text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from importlib import import_module
from itertools import chain
import json
from os import path, getcwd
import os
import subprocess
import inspect
from ..fileop import IOHelper
from ...util import Utility
from ..exechelper import func_exec_run
# import_module can't load the following modules in the NGINX server
# while running in 'immediate' mode. The early imports here are needed.
# This needs to be fixed, otherwise dynamic loading will not work.
try:
import app.biowl.libraries.galaxy.adapter
except:
pass
try:
import app.biowl.libraries.seqtk.adapter
import app.biowl.libraries.bowtie2.adapter
import app.biowl.libraries.bwa.adapter
import app.biowl.libraries.pysam.adapter
import app.biowl.libraries.fastqc.adapter
import app.biowl.libraries.flash.adapter
import app.biowl.libraries.hadoop.adapter
import app.biowl.libraries.pear.adapter
import app.biowl.libraries.seqtk.adapter
import app.biowl.libraries.usearch.adapter
import app.biowl.libraries.vsearch.adapter
except:
pass
def load_module(modulename):
'''
Load a module dynamically from a string module name.
It was first implemented with __import__, but later
replaced by importlib.import_module.
:param modulename:
'''
#if modulename not in sys.modules:
#name = "package." + modulename
#return __import__(modulename, fromlist=[''])
return import_module(modulename)
class Function():
def __init__(self, name, internal, package = None, module = None, params = [], example = None, desc = None, runmode = None, level = 0, group = None):
self.name = name
self.internal = internal
self.package = package
self.module = module
self.params = params
self.example = example
self.desc = desc
self.runmode = runmode
self.level = level
self.group = group
class Library():
def __init__(self, funcs = {}):
self.funcs = funcs
self.tasks = {}
self.localdir = path.join(path.abspath(path.dirname(__file__)), 'storage')
def add_task(self, name, expr):
self.tasks[name] = expr
def run_task(self, name, args, dotaskstmt):
if name in self.tasks:
return dotaskstmt(self.tasks[name][1:], args)
def code_run_task(self, name, args, dotaskstmt):
if name in self.tasks:
return dotaskstmt(self.tasks[name], args), set()
@staticmethod
def load(library_def_file):
library = Library()
library.funcs = Library.load_funcs_recursive(library_def_file)
return library
@staticmethod
def load_funcs_recursive(library_def_file):
if os.path.isfile(library_def_file):
return Library.load_funcs(library_def_file)
all_funcs = {}
for f in os.listdir(library_def_file):
funcs = Library.load_funcs_recursive(os.path.join(library_def_file, f))
for k,v in funcs.items():
if k in all_funcs:
all_funcs[k].extend(v)
else:
all_funcs[k] = v if isinstance(v, list) else [v]
return all_funcs
@staticmethod
def load_funcs(library_def_file):
funcs = {}
try:
if not os.path.isfile(library_def_file) or not library_def_file.endswith(".json"):
return funcs
with open(library_def_file, 'r') as json_data:
d = json.load(json_data)
libraries = d["functions"]
libraries = sorted(libraries, key = lambda k : k['package'].lower())
for f in libraries:
name = f["name"] if f.get("name") else f["internal"]
internal = f["internal"] if f.get("internal") else f["name"]
module = f["module"] if f.get("module") else None
package = f["package"] if f.get("package") else ""
example = f["example"] if f.get("example") else ""
desc = f["desc"] if f.get("desc") else ""
runmode = f["runmode"] if f.get("runmode") else ""
level = int(f["level"]) if f.get("level") else 0
group = f["group"] if f.get("group") else ""
params = []
if f.get("params"):
for param in f["params"]:
params.append(param)
func = Function(name, internal, package, module, params, example, desc, runmode, level, group)
if name.lower() in funcs:
funcs[name.lower()].extend([func])
else:
funcs[name.lower()] = [func]
finally:
return funcs
def func_to_internal_name(self, funcname):
for f in self.funcs:
if f.get("name") and self.iequal(f["name"], funcname):
return f["internal"]
def get_function(self, name, package = None):
if package is not None:
for func in self.funcs[name.lower()]:
if func.package == package:
return [func]
else:
return self.funcs[name.lower()]
def check_function(self, name, package = None):
functions = self.get_function(name, package)
return functions is not None and len(functions) > 0
def funcs_flat(self):
funcs = []
for v in self.funcs.values():
funcs.extend(v)
return funcs
@staticmethod
def split_args(arguments):
args = []
kwargs = {}
for arg in arguments:
if isinstance(arg, tuple):
kwargs[arg[0]] = arg[1]
else:
args.append(arg)
return args, kwargs
def call_func(self, context, package, function, args):
'''
Call a function from a module.
:param context: The context for output and error
:param package: The name of the package. If it's empty, local function is called
:param function: Name of the function
:param args: The arguments for the function
'''
arguments, kwargs = Library.split_args(args)
if not package or package == "None":
if function.lower() == "print":
return context.write(*arguments)
elif function.lower() == "range":
return range(*arguments)
elif function.lower() == "read":
if not arguments:
raise ValueError("Read must have one argument.")
fs = Utility.fs_by_prefix(arguments[0])
return fs.read(arguments[0])
elif function.lower() == "write":
if len(arguments) < 2:
raise ValueError("Write must have two arguments.")
fs = Utility.fs_by_prefix(arguments[0])
return fs.write(arguments[0], arguments[1])
elif function.lower() == "getfiles":
fs = Utility.fs_by_prefix(arguments[0])
return fs.get_files(arguments[0])
elif function.lower() == "getfolders":
fs = Utility.fs_by_prefix(arguments[0])
return fs.get_folders(arguments[0])
elif function.lower() == "createfolder":
fs = Utility.fs_by_prefix(arguments[0])
return fs.create_folder(arguments[0])
elif function.lower() == "remove":
fs = Utility.fs_by_prefix(arguments[0])
return fs.remove(arguments[0])
elif function.lower() == "makedirs":
fs = Utility.fs_by_prefix(arguments[0])
return fs.makedirs(arguments[0])
elif function.lower() == "getcwd":
return getcwd()
elif function.lower() == "len":
return len(arguments[0])
elif function.lower() == "exec":
return func_exec_run(arguments[0], *arguments[1:])
# return func_exec(arguments[0], *arguments[1:])
# else:
# raise ValueError("{0} function not implemented".format(function))
# possibles = globals().copy()
# possibles.update(locals())
# function = possibles.get(function)
# return function(*arguments)
func = self.get_function(function, package)
module_obj = load_module(func[0].module)
function = getattr(module_obj, func[0].internal)
if func[0].runmode == 'dist':
arguments = context.get_activedci() + arguments
# special handling for galaxy if history_id is not given, use history id from symbol table
if (func[0].module == 'app.biowl.libraries.galaxy.adapter'):
if not 'history_id' in kwargs and context.var_exists('history_id'):
fullargspec = inspect.getfullargspec(function)
if fullargspec.varkw:
kwargs['history_id'] = context.get_var('history_id')
return function(*arguments, **kwargs)
def code_func(self, context, package, function, arguments):
'''
Call a function from a module.
:param context: The context for output and error
:param package: The name of the package. If it's empty, local function is called
:param function: Name of the function
:param arguments: The arguments for the function
'''
imports = set()
args = ','.join(arguments)
code = ''
if not package or package == "None":
if function.lower() == "print":
code = "print({0})".format(args)
elif function.lower() == "range":
code = "range({0})".format(args)
elif function.lower() == "read":
imports.add("from fileop import IOHelper")
code = "IOHelper.read({0})".format(args)
elif function.lower() == "write":
imports.add("from fileop import IOHelper")
code = "IOHelper.write({0})".format(args)
elif function.lower() == "getfiles":
imports.add("from fileop import IOHelper")
code = "IOHelper.getfiles({0})".format(args)
elif function.lower() == "getfolders":
imports.add("from fileop import IOHelper")
code = "IOHelper.getfolders({0})".format(args)
elif function.lower() == "remove":
imports.add("from fileop import IOHelper")
code = "IOHelper.remove({0})".format(args)
elif function.lower() == "createfolder":
imports.add("from fileop import IOHelper")
code = "IOHelper.makedirs({0})".format(args)
elif function.lower() == "getcwd":
imports.add("import os")
code = "os.getcwd()"
elif function.lower() == "len":
code = "len({0})".format(arguments[0])
elif function.lower() == "exec":
imports.add("import subprocess")
code = "func_exec_run({0}, {1})".format(arguments[0], arguments[1])
if code:
return code, imports
imports.add("from importlib import import_module")
func = self.get_function(function, package)
code = "module_obj = load_module({0})\n".format(func[0].module)
code += "function = getattr(module_obj, {0})\n".format(func[0].internal)
if context.dci and context.dci[-1] and func.runmode == 'distibuted':
args = [context.dci[-1]] + args
code += "function({0})".format(args)
return code, imports
def __repr__(self):
return "Library: " + repr(self.funcs)
def __getitem__(self, key):
return self.funcs[key]
def __setitem__(self, key, val):
self.funcs[key] = val
def __delitem__(self, key):
del self.funcs[key]
def __contains__(self, key):
return key in self.funcs
def __iter__(self):
return iter(self.funcs.keys())
def __str__(self):
funcs = self.funcs_flat();
#funcs = [num for elem in funcs for num in elem]
if len(funcs) > 0:
mod_name = "Module name"
mod_len = max(max(len(i.module) if i.module is not None else 0 for i in funcs), len(mod_name))
internal_name = "Internal Name"
internal_len = max(max(len(i.internal) for i in funcs), len(internal_name))
func_name = "Function Name"
func_len = max(max(len(i.name) for i in funcs), len(func_name))
param_names = "Parameters"
param_len = len(param_names)
l = 0
for a in funcs:
for v in a.params:
l += len(v)
param_len = max(param_len, l)
# print table header for vars
display = "\n\n{0:3s} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}s}".format(" No", mod_name, mod_len, internal_name, internal_len, func_name, func_len, param_names, param_len)
display += ("\n-------------------" + "-" * (mod_len + internal_len + func_len + param_len))
# print symbol table
i = 1
for v in funcs:
module = v.module if v.module is not None else "None"
parameters = ""
for p in v.params:
if parameters == "":
parameters = "{0}".format(p)
else:
parameters += ", {0}".format(p)
display += "\n{0:3d} | {1:^{2}s} | {3:^{4}s} | {5:^{6}s} | {7:^{8}s}".format(i, module, mod_len, v.internal, internal_len, v.name, func_len, parameters, param_len)
i += 1
return display
|
mainulhossain/phenoproc
|
app/biowl/dsl/func_resolver.py
|
Python
|
mit
| 14,095
|
[
"BWA",
"Galaxy",
"pysam"
] |
fb7e008b6f77ce6ba8ba3c1402e7926ce6cd37a9d848020a302170cdd0534a30
|
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit import DataStructs
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import os
import sys
from pylab import save
import numpy as np
import pymysql
import math
import getpass
from multiprocessing import Pool
import multiprocessing
import getpass
multiprocessing.freeze_support()
N_cores = 25
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: lhm30@cam.ac.uk\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
def login():
user = raw_input("Enter Username for PIDGIN DB [%s]: " % getpass.getuser())
if not user:
user = getpass.getuser()
pprompt = lambda: (getpass.getpass(), getpass.getpass('Retype password: '))
p1, p2 = pprompt()
while p1 != p2:
print('Passwords do not match. Try again')
p1, p2 = pprompt()
return user, p1
def calcNormalFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
return fp
def fp_array(x):
mod, sm = x
ret = []
for s in sm:
ret.append(calcNormalFingerprints(s))
return [mod,ret]
def importQuery(name):
smis = []
query = open(name).read().splitlines()
matrix = []
for q in query:
try:
matrix.append(calcNormalFingerprints(q))
smis.append(q)
except:
print 'err ' + q
pass
return matrix, smis
def getUpName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
def processtarget(x):
filename,fps = x
ret = [u_name[filename[7:-4]],filename[7:-4]]
for i,fp in enumerate(fps):
sim = round(np.average(sorted(DataStructs.BulkTanimotoSimilarity(fp,modfps[filename]),reverse=True)[:req]),3)
ret.append(sim)
return ret
#main
introMessage()
mods = glob.glob('models/*.pkl')
print 'Total Number of Classes : ' + str(len(mods))
u_name = dict()
getUpName()
fps,smis = importQuery(sys.argv[1])
req = int(sys.argv[2])
of = open(sys.argv[1] + '_out_ad_' + str(req) + '_nn.txt', 'w')
of.write('Name\tTarget\t' + '\t'.join(map(str,smis)) + '\n')
print 'Total Number of Query Molecules : ' + str(len(fps))
usr, pw = login()
conn = pymysql.connect(db='pidgin', user=usr, passwd=pw, host='localhost', port=3306)
s_dict = dict()
print 'Gathering active compounds for all targets'
for j, mod in enumerate(mods):
cur = conn.cursor()
cur.execute("SELECT stdsmiles FROM actives WHERE UNIPROT = '"+mod[7:-4]+"';")
s_dict[mod] = np.array(cur.fetchall())[:,0]
print 'Calculating fingerprints for all actives'
modfps = dict()
pool = Pool(processes=N_cores) # set up resources
jobs = pool.imap_unordered(fp_array, [[mod,smiles] for mod, smiles in s_dict.iteritems()])
for i, result in enumerate(jobs):
modfps[result[0]] = result[1]
pool.close()
pool.join()
print 'Calculating near-neighours for all input compounds'
ad_tasks = [[mod,fps] for mod in sorted(mods)]
pool = Pool(processes=N_cores) # set up resources
jobs = pool.imap(processtarget, ad_tasks)
for i, result in enumerate(jobs):
of.write('\t'.join(map(str,result)) + '\n')
of.close()
|
lhm30/PIDGIN
|
ad_analysis_all.py
|
Python
|
mit
| 3,415
|
[
"RDKit"
] |
bb9cad0a2c715473d15ac08b0e1e9e23c3bc4d709eb3b24cd81ffc7618bfc847
|
#!/usr/bin/env python
##########################################################################
#
# Generation of boundary representation from arbitrary geophysical
# fields and initialisation for anisotropic, unstructured meshing.
#
# Copyright (C) 2011-2013 Dr Adam S. Candy, adam.candy@imperial.ac.uk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
import sys
import shutil
import math
from Scientific.IO import NetCDF
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
from pylab import contour
#import matplotlib
#matplotlib._cntr.Cntr
#from matplotlib import contour
#matplotlib.use('Agg')
from numpy import zeros, array, append, exp
import gmsh
#contour = matplotlib.pyplot.contour
# TODO
# Calculate area in right projection
# Add region selection function
# Ensure all islands selected
# Identify Open boundaries differently
# Export command line to geo file
# If nearby, down't clode with parallel
def printv(text):
if (arguments.verbose):
print text
gmsh.gmsh_geo_comment(output, text)
def printvv(text):
if (arguments.debug):
print text
def expand_boxes(region, boxes):
def error():
print 'Error in argument for -b.'
sys.exit(1)
def build_function(function, requireand, axis, comparison, number):
if (len(number) > 0):
function = '%s%s(%s %s %s)' % (function, requireand, axis, comparison, number)
requireand = ' and '
return [function, requireand]
#re.sub(pattern, repl, string,
#((latitude >= -89.0) and (latitude <=-65.0) and (longitude >= -64.0) and (longitude <= -20.0))'
if (len(boxes) > 0):
function = ''
requireor = ''
for box in boxes:
longlat = box.split(',')
if (len(longlat) != 2): error()
long = longlat[0].split(':')
lat = longlat[1].split(':')
if ((len(long) != 2) and (len(lat) != 2)): error()
function_box = ''
requireand = ''
if (len(long) == 2):
[function_box, requireand] = build_function(function_box, requireand, 'longitude', '>=', long[0])
[function_box, requireand] = build_function(function_box, requireand, 'longitude', '<=', long[1])
if (len(lat) == 2):
[function_box, requireand] = build_function(function_box, requireand, 'latitude', '>=', lat[0])
[function_box, requireand] = build_function(function_box, requireand, 'latitude', '<=', lat[1])
if (len(function_box) > 0):
function = '%s%s(%s)' % (function, requireor, function_box)
requireor = ' or '
if (len(function) > 0):
if (region is not 'True'):
region = '((%s) and (%s))' % (region, function)
else:
region = function
return region
def usage():
print '''
-n filename | Input netCDF file
-f filename | Output Gmsh file
-p path1 (path2).. | Specify paths to include
-r function | Function specifying region of interest
-b box1 (box2).. | Boxes with regions of interest
-a minarea | Minimum area of islands
-dx dist | Distance of steps when drawing parallels and meridians (currently in degrees - need to project)
-bounding_latitude latitude | Latitude of boundary to close the domain
-bl latitude | Short form of -bounding_latitude
-exclude_iceshelves | Excludes iceshelf ocean cavities from mesh (default behaviour includes region)
-smooth_data degree | Smoothes boundaries
-no | Do not include open boundaries
-lat latitude | Latitude to extent open domain to
-s scenario | Select scenario (in development)
-v | Verbose
-vv | Very verbose (debugging)
-q | Quiet
-h | Help
------------------------------------------------------------
Example usage:
Include only the main Antarctic mass (path 1), and only parts which lie below 60S
./rtopo_mask_to_stereographic.py RTopo105b_50S.nc -r 'latitude <= -60.0' -p 1
Filchner-Ronne extended out to the 65S parallel
./rtopo_mask_to_stereographic.py RTopo105b_50S.nc -no -b -85.0:-20.0,-89.0:-75.0 -64.0:-30.0,-89.0:-70.0 -30.0:-20.0,-89.0:-75.0 -lat '-65.0'
Antarctica, everything below the 60S parallel, coarse approximation to open boundary
./rtopo_mask_to_stereographic.py RTopo105b_50S.nc -dx 2 -r 'latitude <= -60'
Small region close to the Filcher-Ronne ice shelf
./rtopo_mask_to_stereographic.py RTopo105b_50S.nc -no -b -85.0:-20.0,-89.0:-75.0 -64.0:-30.0,-89.0:-70.0 -30.0:-20.0,-89.0:-75.0 -p 1 -r 'latitude <= -83'
Amundsen Sea
./rtopo_mask_to_stereographic.py RTopo105b_50S.nc -no -b -130.0:-85.0,-85.0:-60.0 -lat -64.0
Small islands, single out, or group with -p
312, 314
79 - an island on 90W 68S
'''
sys.exit(0)
#def scenario(name):
# filcher_ronne = argument
argv = sys.argv[1:]
dx_default = 0.1
class arguments:
input = './RTopo105b_50S.nc'
#output = './stereographic_projection.geo'
output = './shorelines.geo'
boundaries = []
region = 'True'
box = []
minarea = 0
dx = dx_default
extendtolatitude = None
open = True
verbose = True
debug = False
call = ' '.join(argv)
bounding_lat = -50.0
smooth_data = False
smooth_degree = 100
include_iceshelf_ocean_cavities = True
while (len(argv) > 0):
argument = argv.pop(0).rstrip()
if (argument == '-h'): usage()
elif (argument == '-s'): arguments.scenario = str(argv.pop(0).rstrip()); arguments=scenario(arguments.scenario)
elif (argument == '-n'): arguments.input = argv.pop(0).rstrip()
elif (argument == '-f'): arguments.output = argv.pop(0).rstrip()
elif (argument == '-r'): arguments.region = argv.pop(0).rstrip()
elif (argument == '-dx'): arguments.dx = float(argv.pop(0).rstrip())
elif (argument == '-lat'): arguments.extendtolatitude = float(argv.pop(0).rstrip())
elif (argument == '-a'): arguments.minarea = float(argv.pop(0).rstrip())
elif (argument == '-bounding_latitude'): arguments.bounding_lat =float(argv.pop(0).rstrip())
elif (argument == '-bl'): arguments.bounding_lat = float(argv.pop(0).rstrip())
elif (argument == '-smooth_data'):
arguments.smooth_degree = int(argv.pop(0).rstrip())
arguments.smooth_data = True
elif (argument == '-no'): arguments.open = False
elif (argument == '-exclude_ice_shelves'): arguments.include_iceshelf_ocean_cavities = False
elif (argument == '-v'): arguments.verbose = True
elif (argument == '-vv'): arguments.verbose = True; arguments.debug = True;
elif (argument == '-q'): arguments.verbose = False
elif (argument == '-p'):
while ((len(argv) > 0) and (argv[0][0] != '-')):
arguments.boundaries.append(int(argv.pop(0).rstrip()))
elif (argument == '-b'):
while ((len(argv) > 0) and ((argv[0][0] != '-') or ( (argv[0][0] == '-') and (argv[0][1].isdigit()) ))):
arguments.box.append(argv.pop(0).rstrip())
arguments.region = expand_boxes(arguments.region, arguments.box)
source = file(arguments.input,'r')
output = file(arguments.output,'w')
gmsh.gmsh_geo_comment(output, 'Arguments: ' + arguments.call)
printv('Source netCDF located at ' + arguments.input)
printv('Output to ' + arguments.output)
if (len(arguments.boundaries) > 0):
printv('Boundaries restricted to ' + str(arguments.boundaries))
if (arguments.region is not 'True'):
printv('Region defined by ' + str(arguments.region))
if (arguments.dx != dx_default):
printv('Open contours closed with a line formed by points spaced %g degrees apart' % (arguments.dx))
if (arguments.extendtolatitude is not None):
printv('Extending region to meet parallel on latitude ' + str(arguments.extendtolatitude))
gmsh.gmsh_geo_comment(output, '')
def smoothGaussian(list,degree,strippedXs=False):
list = list.tolist()
window=degree*2-1
weight=array([1.0]*window)
weightGauss=[]
for i in range(window):
i=i-degree+1
frac=i/float(window)
gauss=1/(exp((4*(frac))**2))
weightGauss.append(gauss)
weight=array(weightGauss)*weight
smoothed=[0.0]*(len(list)-window)
for i in range(len(smoothed)):
smoothed[i]=sum(array(list[i:i+window])*weight)/sum(weight)
return array(smoothed)
def project(location):
longitude = location[0]
latitude = location[1]
cos = math.cos
sin = math.sin
#pi = math.pi
#longitude_rad2 = longitude * ( pi / 180 )
#latitude_rad2 = latitude * ( pi / 180 )
longitude_rad = math.radians(- longitude - 90)
latitude_rad = math.radians(latitude)
# Changed sign in x formulae - need to check
x = sin( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
y = cos( longitude_rad ) * cos( latitude_rad ) / ( 1 + sin( latitude_rad ) );
return [ x, y ]
def read_rtopo(filename):
file = NetCDF.NetCDFFile(filename, 'r')
#variableNames = fileN.variables.keys()
lon = file.variables['lon'][:]
lat = file.variables['lat'][:]
field = file.variables['z'][:, :]
# % 2
# 0 ocean 1
# 1 ice 0
# 2 shelf 1
# 3 rock 0
if arguments.include_iceshelf_ocean_cavities == True:
printv('Including iceshelf ocean cavities')
field = field % 2
else:
printv('Excluding iceshelf ocean cavities')
field[field>0.5]=1
paths = contour(lon,lat,field,levels=[0.5]).collections[0].get_paths()
return paths
def area_enclosed(p):
return 0.5 * abs(sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, p[1:] + [p[0]])
def check_point_required(region, location):
# make all definitions of the math module available to the function
globals=math.__dict__
globals['longitude'] = location[0]
globals['latitude'] = location[1]
return eval(region, globals)
def array_to_gmsh_points(num, index, location, minarea, region, dx, latitude_max):
gmsh.gmsh_geo_comment(output, 'Ice-Land mass number %s' % (num))
count = 0
pointnumber = len(location[:,0])
valid = [False]*pointnumber
validnumber = 0
loopstart = None
loopend = None
flag = 0
#location[:, 0] = - location[:, 0] - 90.0
for point in range(pointnumber):
longitude = location[point, 0]
latitude = location[point, 1]
if ( check_point_required(region, location[point, :]) ):
valid[point] = True
validnumber += 1
if (flag == 0):
loopstart = point
flag = 1
elif (flag == 1):
loopend = point
#print latitude, valid[point]
if (loopend is None):
printvv('Path %i skipped (no points found in region)' % ( num ))
gmsh.gmsh_geo_comment(output, ' Skipped (no points found in region)\n')
return index
closelast=False
if (compare_points(location[loopstart,:], location[loopend,:], dx)):
# Remove duplicate line at end
# Note loopend no longer valid
valid[loopend] = False
validnumber -= 1
closelast=True
validlocation = zeros( (validnumber, 2) )
close = [False]*validnumber
count = 0
closingrequired = False
closingrequirednumber = 0
for point in range(pointnumber):
if (valid[point]):
validlocation[count,:] = location[point,:]
if ((closingrequired) and (count > 0)):
if (compare_points(validlocation[count-1,:], validlocation[count,:], dx)):
closingrequired = False
close[count] = closingrequired
count += 1
closingrequired = False
else:
if (not closingrequired):
closingrequired = True
closingrequirednumber += 1
if (closelast):
close[-1] = True
closingrequirednumber += 1
if (closingrequirednumber == 0):
closingtext = ''
elif (closingrequirednumber == 1):
closingtext = ' (required closing in %i part of the path)' % (closingrequirednumber)
else:
closingtext = ' (required closing in %i parts of the path)' % (closingrequirednumber)
area = area_enclosed(validlocation)
if (area < minarea):
printvv('Path %i skipped (area too small)' % ( num ))
gmsh.gmsh_geo_comment(output, ' Skipped (area too small)\n')
return index
printv('Path %i points %i/%i area %g%s' % ( num, validnumber, pointnumber, area_enclosed(validlocation), closingtext ))
# if (closingrequired and closewithparallel):
# latitude_max = None
# index_start = index + 1
# for point in range(validnumber - 1):
# longitude = validlocation[point,0]
# latitude = validlocation[point,1]
# index += 1
# loc = project(longitude, latitude)
# gmsh.gmsh_geo_draw_point(output, index, loc, 0) )
# if (latitude_max is None):
# latitude_max = latitude
# else:
# latitude_max = max(latitude_max, latitude)
# gmsh.gmsh_geo_draw_parallel(output, index, index_start, [ validlocation[point,0], max(latitude_max, validlocation[point,1]) ], [ validlocation[0,0], max(latitude_max, validlocation[0,1]) ], points=200)
# index += 200
#
# index += 1
# gmsh.gmsh_geo_draw_point(output, index, project(validlocation[0,0], validlocation[0,1]), 0) )
#
# else:
if (close[0]):
close[-1] = close[0]
index.start = index.point + 1
loopstartpoint = index.start
for point in range(validnumber):
#longitude = validlocation[point,0]
#latitude = validlocation[point,1]
if ((close[point]) and (point == validnumber - 1) and (not (compare_points(validlocation[point], validlocation[0], dx)))):
gmsh.gmsh_geo_comment(output, '**** END ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, False, False)
index = draw_parallel_explicit(validlocation[point], validlocation[0], index, latitude_max, dx)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, True, True)
gmsh.gmsh_geo_comment(output, '**** END end of loop ' + str(closelast) + str(point) + '/' + str(validnumber-1) + str(close[point]))
elif ((close[point]) and (point > 0) and (not (compare_points(validlocation[point], validlocation[0], dx)))):
gmsh.gmsh_geo_comment(output, '**** NOT END ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
gmsh.gmsh_geo_comment(output, str(validlocation[point,:]) + str(validlocation[point,:]))
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, False, False)
index = draw_parallel_explicit(validlocation[point - 1], validlocation[point], index, latitude_max, dx)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, False, True)
gmsh.gmsh_geo_comment(output, '**** NOT END end of loop ' + str(point) + '/' + str(validnumber-1) + str(close[point]))
else:
index.point += 1
gmsh.gmsh_geo_draw_point(output, index.point, project(validlocation[point,:]), 0)
index.contournodes.append(index.point)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, (closelast and (point == validnumber - 1)), False)
return index
#LoopStart1 = IP + 20;
#LoopEnd1 = IP + 3157;
#BSpline ( IL + 1 ) = { IP + 20 : IP + 3157 };
#Line Loop( ILL + 10 ) = { IL + 1 };
#
#LoopStart1 = IP + 3157;
#LoopEnd1 = IP + 3231;
#BSpline ( IL + 2 ) = { IP + 3157 : IP + 3231, IP + 20 };
#Line Loop( ILL + 20 ) = { IL + 2 };
def output_boundaries(index, filename, paths=None, minarea=0, region='True', dx=0.1, latitude_max=None):
pathall = read_rtopo(filename)
printv('Paths found: ' + str(len(pathall)))
gmsh.gmsh_geo_header(output)
splinenumber = 0
indexbase = 1
index.point = indexbase
if ((paths is not None) and (len(paths) > 0)):
pathids=paths
else:
pathids=range(len(pathall)+1)[1:]
for num in pathids:
xy=pathall[num-1].vertices
if arguments.smooth_data:
x = smoothGaussian(xy[:,0], degree=arguments.smooth_degree)
y = smoothGaussian(xy[:,1], degree=arguments.smooth_degree)
xy = zeros([len(x),2])
xy[:,0] = x
xy[:,1] = y
index = array_to_gmsh_points(num, index, xy, minarea, region, dx, latitude_max)
#for i in range(-85, 0, 5):
# indexend += 1
# gmsh.gmsh_geo_draw_point(output, indexend, project(0, i), 0) )
#for i in range(-85, 0, 5):
# indexend += 1
# gmsh.gmsh_geo_draw_point(output, indexend, project(45, i), 0) )
gmsh.gmsh_geo_remove_projection_points(output)
return index
def compare_points(a, b, dx):
tolerance = dx * 0.6
if ( not (abs(a[1] - b[1]) < tolerance) ):
#gmsh.gmsh_geo_comment(output, 'lat differ')
return False
elif (abs(a[0] - b[0]) < tolerance):
#gmsh.gmsh_geo_comment(output, 'long same')
return True
elif ((abs(abs(a[0]) - 180) < tolerance) and (abs(abs(b[0]) - 180) < tolerance)):
#gmsh.gmsh_geo_comment(output, 'long +/-180')
return True
else:
#gmsh.gmsh_geo_comment(output, 'not same %g %g' % (abs(abs(a[0]) - 180), abs(abs(b[0]) - 180) ) )
return False
def output_open_boundaries(index, boundary, dx):
parallel = arguments.bounding_lat
index.start = index.point + 1
loopstartpoint = index.start
index = draw_parallel_explicit([ -1.0, parallel], [ 179.0, parallel], index, None, dx)
index = draw_parallel_explicit([-179.0, parallel], [ 1.0, parallel], index, None, dx)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, True, True)
return index
def draw_parallel_explicit(start, end, index, latitude_max, dx):
#print start, end, index.point
# Note start is actual start - 1
if (latitude_max is None):
latitude_max = max(start[1], end[1])
else:
latitude_max = max(latitude_max, start[1], end[1])
current = start
tolerance = dx * 0.6
gmsh.gmsh_geo_comment(output, 'Closing path with parallels and merdians, from (%.8f, %.8f) to (%.8f, %.8f)' % ( start[0], start[1], end[0], end[1] ) )
if (compare_points(current, end, dx)):
gmsh.gmsh_geo_comment(output, 'Points already close enough, no need to draw parallels and meridians after all')
return index
gmsh.gmsh_geo_comment(output, 'Drawing meridian to max latitude index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], latitude_max))
while (current[1] != latitude_max):
if (current[1] < latitude_max):
current[1] = current[1] + dx
else:
current[1] = current[1] - dx
if (abs(current[1] - latitude_max) < tolerance): current[1] = latitude_max
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing meridian to max latitude index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], latitude_max))
loc = project(current)
gmsh.gmsh_geo_draw_point(output, index.point, loc, 0.0)
gmsh.gmsh_geo_comment(output, 'Drawing parallel index %s at %f.2 (to match %f.2), %f.2' % (index.point, current[0], end[0], current[1]))
while (current[0] != end[0]):
if (current[0] < end[0]):
current[0] = current[0] + dx
else:
current[0] = current[0] - dx
if (abs(current[0] - end[0]) < tolerance): current[0] = end[0]
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing parallel index %s at %f.2 (to match %f.2), %f.2' % (index.point, current[0], end[0], current[1]))
loc = project(current)
gmsh.gmsh_geo_draw_point(output, index.point, loc, 0.0)
gmsh.gmsh_geo_comment(output, 'Drawing meridian to end index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], end[1]))
while (current[1] != end[1]):
if (current[1] < end[1]):
current[1] = current[1] + dx
else:
current[1] = current[1] - dx
if (abs(current[1] - end[1]) < tolerance): current[1] = end[1]
if (compare_points(current, end, dx)): return index
index.point += 1
printvv('Drawing meridian to end index %s at %f.2, %f.2 (to match %f.2)' % (index.point, current[0], current[1], end[1]))
loc = project(current)
gmsh.gmsh_geo_draw_point(output, index.point, loc, 0.0)
gmsh.gmsh_geo_comment(output, 'Closed path with parallels and merdians, from (%.8f, %.8f) to (%.8f, %.8f)' % ( start[0], start[1], end[0], end[1] ) )
return index
def acc_array():
acc = array([[ 1.0, -53.0 ],
[ 10.0, -53.0 ],
[ 20.0, -52.0 ],
[ 30.0, -56.0 ],
[ 40.0, -60.0 ],
[ 50.0, -63.0 ],
[ 60.0, -64.0 ],
[ 70.0, -65.0 ],
[ 80.0, -67.0 ],
[ 90.0, -60.0 ],
[ 100.0, -58.0 ],
[ 110.0, -62.0 ],
[ 120.0, -63.0 ],
[ 130.0, -65.0 ],
[ 140.0, -65.0 ],
[ 150.0, -64.0 ],
[ 160.0, -61.0 ],
[ 170.0, -64.0 ],
[ 179.0, -65.0 ],
[-179.0, -65.0 ],
[-170.0, -64.0 ],
[-160.0, -62.0 ],
[-150.0, -66.0 ],
[-140.0, -58.0 ],
[-130.0, -60.0 ],
[-120.0, -65.0 ],
[-110.0, -66.0 ],
[-100.0, -70.0 ],
[ -90.0, -70.0 ],
[ -80.0, -77.0 ],
[ -70.0, -72.0 ],
[ -60.0, -60.0 ],
[ -50.0, -57.0 ],
[ -40.0, -51.0 ],
[ -30.0, -50.0 ],
[ -20.0, -60.0 ],
[ -10.0, -56.0 ],
[ -1.0, -53.0 ]])
return acc
def draw_acc_old(index, boundary, dx):
acc = acc_array()
gmsh.gmsh_geo_comment(output, 'ACC')
index.start = index.point + 1
loopstartpoint = index.start
for i in range(len(acc[:,0])):
index.point += 1
location = project(acc[i,:])
gmsh.gmsh_geo_draw_point(output, index.point, location, 0.0)
for i in range(len(acc[:,0])):
a = index.start + i
b = a + 1
if (a == index.point):
b = index.start
output.write('Line(%i) = {%i,%i};\n' % (i + 100000, a, b ))
output.write('Line Loop(999999) = { %i : %i};\n' % ( index.start, index.point ))
return index
def draw_acc(index, boundary, dx):
acc = acc_array()
acc1 = acc[0:18,:]
acc2 = acc[19:,:]
print acc1
print acc2
gmsh.gmsh_geo_comment(output, 'ACC')
index.start = index.point + 1
loopstartpoint = index.start
for i in range(len(acc1[:,0])):
index.point += 1
location = project(acc1[i,:])
gmsh.gmsh_geo_draw_point(output, index.point, location, 0.0)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, False, True)
#index.start = index.point + 1
#loopstartpoint = index.start
for i in range(len(acc2[:,0])):
index.point += 1
location = project(acc2[i,:])
gmsh.gmsh_geo_draw_point(output, index.point, location, 0.0)
index = gmsh.gmsh_geo_draw_loop(output, boundary, index, loopstartpoint, True, True)
return index
class index:
point = 0
path = 0
contour = []
contournodes= []
open = []
skipped = []
start = 0
pathsinloop = []
loop = 0
loops = []
class boundary:
contour = 3
open = 4
surface = 9
index = output_boundaries(index, filename=arguments.input, paths=arguments.boundaries, minarea=arguments.minarea, region=arguments.region, dx=arguments.dx, latitude_max=arguments.extendtolatitude)
if (arguments.open): index = output_open_boundaries(index, boundary, arguments.dx)
printv('Open boundaries (id %i): %s' % (boundary.open, gmsh.list_to_space_separated(index.open, add=1)))
printv('Closed boundaries (id %i): %s' % (boundary.contour, gmsh.list_to_space_separated(index.contour, add=1)))
gmsh.gmsh_geo_define_surfaces(output, index, boundary)
#index = draw_acc(index, boundary, arguments.dx)
gmsh.gmsh_geo_output_fields(output, index,boundary)
if (len(index.skipped) > 0):
printv('Skipped (because no point on the boundary appeared in the required region, or area enclosed by the boundary was too small):\n'+' '.join(index.skipped))
output.close()
|
adamcandy/QGIS-Meshing
|
extras/contouring/rtopo_mask_to_stereographic.py
|
Python
|
lgpl-2.1
| 23,934
|
[
"NetCDF"
] |
75bf38902fd1bdddee877092a9a131239a1ef88faeb441e78b8624d5166f3d5b
|
__RCSID__ = "$Id$"
from DIRAC import gLogger, gConfig, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.StorageManagementSystem.DB.StorageManagementDB import THROTTLING_STEPS, THROTTLING_TIME
import re
AGENT_NAME = 'StorageManagement/StageRequestAgent'
class StageRequestAgent( AgentModule ):
def initialize( self ):
self.stagerClient = StorageManagerClient()
# self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.getStorageUsage()
if not res['OK']:
return res
return self.submitStageRequests()
def getStorageUsage( self ):
""" Fill the current Status of the SE Caches from the DB
"""
self.storageElementCache = {}
res = self.stagerClient.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal( "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:" )
for storageElement in sorted( self.storageElementUsage.keys() ):
seDict = self.storageElementUsage[storageElement]
# Convert to GB for printout
seDict['TotalSize'] = seDict['TotalSize'] / ( 1000 * 1000 * 1000.0 )
gLogger.info( "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] ) )
if not self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: No active stage/pin requests found." )
return S_OK()
def submitStageRequests( self ):
""" This manages the following transitions of the Replicas
* Waiting -> Offline (if the file is not found Cached)
* Waiting -> StageSubmitted (if the file is found Cached)
* Offline -> StageSubmitted (if there are not more Waiting replicas)
"""
# Retry Replicas that have not been Staged in a previous attempt
res = self._getMissingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
if seReplicas:
gLogger.info( "StageRequest.submitStageRequests: Completing partially Staged Tasks" )
for storageElement, seReplicaIDs in seReplicas.iteritems():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
# Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
res = self._getOnlineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
# Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
res = self._getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
# Merge info from both results
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].iteritems():
seReplicas.setdefault( storageElement, [] ).extend( seReplicaIDs )
allReplicaInfo.update( res['Value']['AllReplicaInfo'] )
gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas for staging." % len( allReplicaInfo ) )
for storageElement, seReplicaIDs in seReplicas.iteritems():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
return S_OK()
def _getMissingReplicas( self ):
""" This recovers Replicas that were not Staged on a previous attempt (the stage request failed or timed out),
while other Replicas of the same task are already Staged. If left behind they can produce a deadlock.
All SEs are considered, even if their Cache is full
"""
# Get Replicas that are in Staged/StageSubmitted
gLogger.info( 'StageRequest._getMissingReplicas: Checking Staged Replicas' )
res = self.__getStagedReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
replicasToStage = []
for seReplicaIDs in res['Value']['SEReplicas'].itervalues():
# Consider all SEs
replicasToStage += seReplicaIDs
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getMissingReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOnlineReplicas( self ):
""" This manages the transition
* Waiting -> Offline (if the file is not found Cached)
and returns the list of Cached Replicas for which the pin time has to be extended
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOnlineReplicas: Checking Online Replicas to be handled' )
res = self.__getWaitingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOnlineReplicas: There were no Waiting replicas found" )
return res
gLogger.info( "StageRequest._getOnlineReplicas: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].iteritems():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOnlineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
# Check if the Replica Metadata is OK and find out if they are Online or Offline
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest._getOnlineReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep only Online Replicas
seReplicas[storageElement] = res['Value']['Online']
replicasToStage += res['Value']['Online']
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOnlineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def _getOfflineReplicas( self ):
""" This checks Replicas in Offline status
and returns the list of Replicas to be Staged
SEs for which the cache is currently full are not considered
"""
# Get all Replicas in Waiting Status associated to Staging Tasks
gLogger.verbose( 'StageRequest._getOfflineReplicas: Checking Offline Replicas to be handled' )
res = self.__getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = {}
allReplicaInfo = res['Value']['AllReplicaInfo']
if not len( allReplicaInfo ):
gLogger.info( "StageRequest._getOfflineReplicas: There were no Offline replicas found" )
return res
gLogger.info( "StageRequest._getOfflineReplicas: Obtained %s replicas Offline for staging." % len( allReplicaInfo ) )
replicasToStage = []
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].iteritems():
if not self.__usage( storageElement ) < self.__cache( storageElement ):
gLogger.info( 'StageRequest._getOfflineReplicas: Skipping %s, current usage above limit ( %s GB )' % ( storageElement, self.__cache( storageElement ) ) )
# Do not consider those SE that have the Cache full
continue
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
seReplicas[storageElement].append( replicaID )
replicasToStage.append( replicaID )
self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
if not self.__usage( storageElement ) < self.__cache( storageElement ):
# Stop adding Replicas when the cache is full
break
# Get Replicas from the same Tasks as those selected
res = self.__addAssociatedReplicas( replicasToStage, seReplicas, allReplicaInfo )
if not res['OK']:
gLogger.fatal( "StageRequest._getOfflineReplicas: Failed to get associated Replicas.", res['Message'] )
return res
def __usage( self, storageElement ):
""" Retrieve current usage of SE
"""
# Set it if not yet done
self.storageElementUsage.setdefault( storageElement, {'TotalSize': 0.} )
return self.storageElementUsage[storageElement]['TotalSize']
def __cache( self, storageElement ):
""" Retrieve cache size for SE
"""
if storageElement not in self.storageElementCache:
diskCacheTB = float(StorageElement(storageElement).options.get('DiskCacheTB', 1.0))
self.storageElementCache[storageElement] = diskCacheTB * 1000. / THROTTLING_STEPS
return self.storageElementCache[storageElement]
def __add( self, storageElement, size ):
""" Add size (in bytes) to current usage of storageElement (in GB)
"""
self.storageElementUsage.setdefault( storageElement, {'TotalSize': 0.} )
size /= 1000. * 1000. * 1000.
self.storageElementUsage[storageElement]['TotalSize'] += size
return size
def _issuePrestageRequests( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Make the request to the SE and update the DB
"""
# Since we are in a give SE, the lfn is a unique key
lfnRepIDs = {}
for replicaID in seReplicaIDs:
lfn = allReplicaInfo[replicaID]['LFN']
lfnRepIDs[lfn] = replicaID
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedLfnIDs = []
if lfnRepIDs:
gLogger.info( "StageRequest._issuePrestageRequests: Submitting %s stage requests for %s." % ( len( lfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).prestageFile( lfnRepIDs, lifetime = self.pinLifetime )
gLogger.debug( "StageRequest._issuePrestageRequests: StorageElement.prestageStorageFile: res=", res )
# Daniela: fishy result from ReplicaManager!!! Should NOT return OK
# res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
# res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Completely failed to submit stage requests for replicas.", res['Message'] )
else:
for lfn, requestID in res['Value']['Successful'].iteritems():
stageRequestMetadata.setdefault( requestID, [] ).append( lfnRepIDs[lfn] )
updatedLfnIDs.append( lfnRepIDs[lfn] )
if stageRequestMetadata:
gLogger.info( "StageRequest._issuePrestageRequests: %s stage request metadata to be updated." % len( stageRequestMetadata ) )
res = self.stagerClient.insertStageRequest( stageRequestMetadata, self.pinLifetime )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert stage request metadata.", res['Message'] )
return res
res = self.stagerClient.updateReplicaStatus( updatedLfnIDs, 'StageSubmitted' )
if not res['OK']:
gLogger.error( "StageRequest._issuePrestageRequests: Failed to insert replica status.", res['Message'] )
return
def __sortBySE( self, replicaDict ):
seReplicas = {}
replicaIDs = {}
for replicaID, info in replicaDict.iteritems():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement}
seReplicas.setdefault( storageElement, [] ).append( replicaID )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':replicaIDs} )
def __getStagedReplicas( self ):
""" This obtains the Staged replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getStagedReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getStagedReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getStagedReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getStagedReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getWaitingReplicas( self ):
""" This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getWaitingReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getWaitingReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __getOfflineReplicas( self ):
""" This obtains the Offline replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.stagerClient.getOfflineReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getOfflineReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getOfflineReplicas: No Waiting replicas found to process." )
else:
gLogger.debug( "StageRequest.__getOfflineReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
return self.__sortBySE( res['Value'] )
def __addAssociatedReplicas( self, replicasToStage, seReplicas, allReplicaInfo ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self.stagerClient.getAssociatedReplicas( replicasToStage )
if not res['OK']:
gLogger.fatal( "StageRequest.__addAssociatedReplicas: Failed to get associated Replicas.", res['Message'] )
return res
addReplicas = {'Offline': {}, 'Waiting': {}}
replicaIDs = {}
for replicaID, info in res['Value'].iteritems():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
status = info['Status']
if status in ['Waiting', 'Offline']:
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement }
addReplicas[status].setdefault( storageElement, [] ).append( replicaID )
waitingReplicas = addReplicas['Waiting']
offlineReplicas = addReplicas['Offline']
newReplicaInfo = replicaIDs
allReplicaInfo.update( newReplicaInfo )
# First handle Waiting Replicas for which metadata is to be checked
for storageElement, seReplicaIDs in waitingReplicas.iteritems():
for replicaID in list( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
res = self.__checkIntegrity( storageElement, seReplicaIDs, allReplicaInfo )
if not res['OK']:
gLogger.error( 'StageRequest.__addAssociatedReplicas: Failed to check Replica Metadata', '(%s): %s' % ( storageElement, res['Message'] ) )
else:
# keep all Replicas (Online and Offline)
seReplicas.setdefault( storageElement, [] ).extend( res['Value']['Online'] )
replicasToStage.extend( res['Value']['Online'] )
seReplicas[storageElement].extend( res['Value']['Offline'] )
replicasToStage.extend( res['Value']['Offline'] )
# Then handle Offline Replicas for which metadata is already checked
for storageElement, seReplicaIDs in offlineReplicas.iteritems():
for replicaID in sorted( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
seReplicas.setdefault( storageElement, [] ).extend( seReplicaIDs )
replicasToStage.extend( seReplicaIDs )
for replicaID in allReplicaInfo.keys():
if replicaID not in replicasToStage:
del allReplicaInfo[replicaID]
totalSize = 0
for storageElement in sorted( seReplicas.keys() ):
replicaIDs = seReplicas[storageElement]
size = 0
for replicaID in replicaIDs:
size += self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
gLogger.info( 'StageRequest.__addAssociatedReplicas: Considering %s GB to be staged at %s' % ( size, storageElement ) )
totalSize += size
gLogger.info( "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':allReplicaInfo} )
def __checkIntegrity( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Check the integrity of the files to ensure they are available
Updates status of Offline Replicas for a later pass
Return list of Online replicas to be Stage
"""
if not seReplicaIDs:
return S_OK( {'Online': [], 'Offline': []} )
# Since we are with a given SE, the LFN is a unique key
lfnRepIDs = {}
for replicaID in seReplicaIDs:
lfn = allReplicaInfo[replicaID]['LFN']
lfnRepIDs[lfn] = replicaID
gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( lfnRepIDs ), storageElement ) )
res = StorageElement( storageElement ).getFileMetadata( lfnRepIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
return res
terminalReplicaIDs = {}
onlineReplicaIDs = []
offlineReplicaIDs = []
for lfn, metadata in res['Value']['Successful'].iteritems():
if metadata['Size'] != allReplicaInfo[lfnRepIDs[lfn]]['Size']:
gLogger.error( "StageRequest.__checkIntegrity: LFN StorageElement size does not match FileCatalog", lfn )
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN StorageElement size does not match FileCatalog'
lfnRepIDs.pop( lfn )
elif metadata.get( 'Lost', False ):
gLogger.error( "StageRequest.__checkIntegrity: LFN has been Lost by the StorageElement", lfn )
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN has been Lost by the StorageElement'
lfnRepIDs.pop( lfn )
elif metadata.get( 'Unavailable', False ):
gLogger.error( "StageRequest.__checkIntegrity: LFN is declared Unavailable by the StorageElement", lfn )
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN is declared Unavailable by the StorageElement'
lfnRepIDs.pop( lfn )
elif metadata.get( 'Cached', metadata['Accessible'] ):
gLogger.verbose( "StageRequest.__checkIntegrity: Cache hit for file." )
onlineReplicaIDs.append( lfnRepIDs[lfn] )
else:
offlineReplicaIDs.append( lfnRepIDs[lfn] )
for lfn, reason in res['Value']['Failed'].iteritems():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageRequest.__checkIntegrity: LFN does not exist in the StorageElement", lfn )
terminalReplicaIDs[lfnRepIDs[lfn]] = 'LFN does not exist in the StorageElement'
lfnRepIDs.pop( lfn )
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
if onlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Online." % len( onlineReplicaIDs ) )
if offlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Offline." % len( offlineReplicaIDs ) )
res = self.stagerClient.updateReplicaStatus( offlineReplicaIDs, 'Offline' )
return S_OK( {'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs} )
|
fstagni/DIRAC
|
StorageManagementSystem/Agent/StageRequestAgent.py
|
Python
|
gpl-3.0
| 22,625
|
[
"DIRAC"
] |
51b3edfddf250f68025352613069cd616c0cd056ab127d30570a708aa7406405
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements representations of slabs and surfaces, as well as
algorithms for generating them. If you use this module, please consider
citing the following work::
R. Tran, Z. Xu, B. Radhakrishnan, D. Winston, W. Sun, K. A. Persson,
S. P. Ong, "Surface Energies of Elemental Crystals", Scientific Data,
2016, 3:160080, doi: 10.1038/sdata.2016.80.
as well as::
Sun, W.; Ceder, G. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
"""
import copy
import itertools
import json
import logging
import math
import os
import warnings
from functools import reduce
from math import gcd
import numpy as np
from monty.fractions import lcm
from scipy.cluster.hierarchy import fcluster, linkage
from scipy.spatial.distance import squareform
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list
__author__ = "Richard Tran, Wenhao Sun, Zihan Xu, Shyue Ping Ong"
logger = logging.getLogger(__name__)
class Slab(Structure):
"""
Subclass of Structure representing a Slab. Implements additional
attributes pertaining to slabs, but the init method does not
actually implement any algorithm that creates a slab. This is a
DUMMY class who's init method only holds information about the
slab. Also has additional methods that returns other information
about a slab such as the surface area, normal, and atom adsorption.
Note that all Slabs have the surface normal oriented perpendicular to the a
and b lattice vectors. This means the lattice vectors a and b are in the
surface plane and the c vector is out of the surface plane (though not
necessarily perpendicular to the surface).
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: shift
The shift value in Angstrom that indicates how much this
slab has been shifted.
"""
def __init__(
self,
lattice,
species,
coords,
miller_index,
oriented_unit_cell,
shift,
scale_factor,
reorient_lattice=True,
validate_proximity=False,
to_unit_cell=False,
reconstruction=None,
coords_are_cartesian=False,
site_properties=None,
energy=None,
):
"""
Makes a Slab structure, a structure object with additional information
and methods pertaining to slabs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
oriented_unit_cell (Structure): The oriented_unit_cell from which
this Slab is created (by scaling in the c-direction).
shift (float): The shift in the c-direction applied to get the
termination.
scale_factor (np.ndarray): scale_factor Final computed scale factor
that brings the parent cell to the surface cell.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is along the z axis.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
reconstruction (str): Type of reconstruction. Defaults to None if
the slab is not reconstructed.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
energy (float): A value for the energy.
"""
self.oriented_unit_cell = oriented_unit_cell
self.miller_index = tuple(miller_index)
self.shift = shift
self.reconstruction = reconstruction
self.scale_factor = np.array(scale_factor)
self.energy = energy
self.reorient_lattice = reorient_lattice
if self.reorient_lattice:
if coords_are_cartesian:
coords = lattice.get_fractional_coords(coords)
coords_are_cartesian = False
lattice = Lattice.from_parameters(
lattice.a,
lattice.b,
lattice.c,
lattice.alpha,
lattice.beta,
lattice.gamma,
)
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
)
def get_orthogonal_c_slab(self):
"""
This method returns a Slab where the normal (c lattice vector) is
"forced" to be exactly orthogonal to the surface a and b lattice
vectors. **Note that this breaks inherent symmetries in the slab.**
It should be pointed out that orthogonality is not required to get good
surface energies, but it can be useful in cases where the slabs are
subsequently used for postprocessing of some kind, e.g. generating
GBs or interfaces.
"""
a, b, c = self.lattice.matrix
new_c = np.cross(a, b)
new_c /= np.linalg.norm(new_c)
new_c = np.dot(c, new_c) * new_c
new_latt = Lattice([a, b, new_c])
return Slab(
lattice=new_latt,
species=self.species_and_occu,
coords=self.cart_coords,
miller_index=self.miller_index,
oriented_unit_cell=self.oriented_unit_cell,
shift=self.shift,
scale_factor=self.scale_factor,
coords_are_cartesian=True,
energy=self.energy,
reorient_lattice=self.reorient_lattice,
site_properties=self.site_properties,
)
def get_tasker2_slabs(self, tol=0.01, same_species_only=True):
"""
Get a list of slabs that have been Tasker 2 corrected.
Args:
tol (float): Tolerance to determine if atoms are within same plane.
This is a fractional tolerance, not an absolute one.
same_species_only (bool): If True, only that are of the exact same
species as the atom at the outermost surface are considered for
moving. Otherwise, all atoms regardless of species that is
within tol are considered for moving. Default is True (usually
the desired behavior).
Returns:
([Slab]) List of tasker 2 corrected slabs.
"""
sites = list(self.sites)
slabs = []
sortedcsites = sorted(sites, key=lambda site: site.c)
# Determine what fraction the slab is of the total cell size in the
# c direction. Round to nearest rational number.
nlayers_total = int(round(self.lattice.c / self.oriented_unit_cell.lattice.c))
nlayers_slab = int(round((sortedcsites[-1].c - sortedcsites[0].c) * nlayers_total))
slab_ratio = nlayers_slab / nlayers_total
a = SpacegroupAnalyzer(self)
symm_structure = a.get_symmetrized_structure()
def equi_index(site):
for i, equi_sites in enumerate(symm_structure.equivalent_sites):
if site in equi_sites:
return i
raise ValueError("Cannot determine equi index!")
for surface_site, shift in [
(sortedcsites[0], slab_ratio),
(sortedcsites[-1], -slab_ratio),
]:
tomove = []
fixed = []
for site in sites:
if abs(site.c - surface_site.c) < tol and (
(not same_species_only) or site.species == surface_site.species
):
tomove.append(site)
else:
fixed.append(site)
# Sort and group the sites by the species and symmetry equivalence
tomove = sorted(tomove, key=lambda s: equi_index(s))
grouped = [list(sites) for k, sites in itertools.groupby(tomove, key=lambda s: equi_index(s))]
if len(tomove) == 0 or any(len(g) % 2 != 0 for g in grouped):
warnings.warn(
"Odd number of sites to divide! Try changing "
"the tolerance to ensure even division of "
"sites or create supercells in a or b directions "
"to allow for atoms to be moved!"
)
continue
combinations = []
for g in grouped:
combinations.append(list(itertools.combinations(g, int(len(g) / 2))))
for selection in itertools.product(*combinations):
species = [site.species for site in fixed]
fcoords = [site.frac_coords for site in fixed]
for s in tomove:
species.append(s.species)
for group in selection:
if s in group:
fcoords.append(s.frac_coords)
break
else:
# Move unselected atom to the opposite surface.
fcoords.append(s.frac_coords + [0, 0, shift])
# sort by species to put all similar species together.
sp_fcoord = sorted(zip(species, fcoords), key=lambda x: x[0])
species = [x[0] for x in sp_fcoord]
fcoords = [x[1] for x in sp_fcoord]
slab = Slab(
self.lattice,
species,
fcoords,
self.miller_index,
self.oriented_unit_cell,
self.shift,
self.scale_factor,
energy=self.energy,
reorient_lattice=self.reorient_lattice,
)
slabs.append(slab)
s = StructureMatcher()
unique = [ss[0] for ss in s.group_structures(slabs)]
return unique
def is_symmetric(self, symprec=0.1):
"""
Checks if surfaces are symmetric, i.e., contains inversion, mirror on (hkl) plane,
or screw axis (rotation and translation) about [hkl].
Args:
symprec (float): Symmetry precision used for SpaceGroup analyzer.
Returns:
(bool) Whether surfaces are symmetric.
"""
sg = SpacegroupAnalyzer(self, symprec=symprec)
symmops = sg.get_point_group_operations()
if (
sg.is_laue()
or any(op.translation_vector[2] != 0 for op in symmops)
or any(np.alltrue(op.rotation_matrix[2] == np.array([0, 0, -1])) for op in symmops)
):
# Check for inversion symmetry. Or if sites from surface (a) can be translated
# to surface (b) along the [hkl]-axis, surfaces are symmetric. Or because the
# two surfaces of our slabs are always parallel to the (hkl) plane,
# any operation where theres an (hkl) mirror plane has surface symmetry
return True
return False
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species. Note that Slab has to override this
because of the different __init__ args.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
s = Structure.from_sites(sites)
return Slab(
s.lattice,
s.species_and_occu,
s.frac_coords,
self.miller_index,
self.oriented_unit_cell,
self.shift,
self.scale_factor,
site_properties=s.site_properties,
reorient_lattice=self.reorient_lattice,
)
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Slab(
self.lattice,
self.species_and_occu,
self.frac_coords,
self.miller_index,
self.oriented_unit_cell,
self.shift,
self.scale_factor,
site_properties=props,
reorient_lattice=self.reorient_lattice,
)
@property
def dipole(self):
"""
Calculates the dipole of the Slab in the direction of the surface
normal. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always have a dipole of 0.
"""
dipole = np.zeros(3)
mid_pt = np.sum(self.cart_coords, axis=0) / len(self)
normal = self.normal
for site in self:
charge = sum(getattr(sp, "oxi_state", 0) * amt for sp, amt in site.species.items())
dipole += charge * np.dot(site.coords - mid_pt, normal) * normal
return dipole
def is_polar(self, tol_dipole_per_unit_area=1e-3):
"""
Checks whether the surface is polar by computing the dipole per unit
area. Note that the Slab must be oxidation state-decorated for this
to work properly. Otherwise, the Slab will always be non-polar.
Args:
tol_dipole_per_unit_area (float): A tolerance. If the dipole
magnitude per unit area is less than this value, the Slab is
considered non-polar. Defaults to 1e-3, which is usually
pretty good. Normalized dipole per unit area is used as it is
more reliable than using the total, which tends to be larger for
slabs with larger surface areas.
"""
dip_per_unit_area = self.dipole / self.surface_area
return np.linalg.norm(dip_per_unit_area) > tol_dipole_per_unit_area
@property
def normal(self):
"""
Calculates the surface normal vector of the slab
"""
normal = np.cross(self.lattice.matrix[0], self.lattice.matrix[1])
normal /= np.linalg.norm(normal)
return normal
@property
def surface_area(self):
"""
Calculates the surface area of the slab
"""
m = self.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
@property
def center_of_mass(self):
"""
Calculates the center of mass of the slab
"""
weights = [s.species.weight for s in self]
center_of_mass = np.average(self.frac_coords, weights=weights, axis=0)
return center_of_mass
def add_adsorbate_atom(self, indices, specie, distance):
"""
Gets the structure of single atom adsorption.
slab structure from the Slab class(in [0, 0, 1])
Args:
indices ([int]): Indices of sites on which to put the absorbate.
Absorbed atom will be displaced relative to the center of
these sites.
specie (Species/Element/str): adsorbed atom species
distance (float): between centers of the adsorbed atom and the
given site in Angstroms.
"""
# Let's do the work in cartesian coords
center = np.sum([self[i].coords for i in indices], axis=0) / len(indices)
coords = center + self.normal * distance / np.linalg.norm(self.normal)
self.append(specie, coords, coords_are_cartesian=True)
def __str__(self):
comp = self.composition
outs = [
"Slab Summary (%s)" % comp.formula,
"Reduced Formula: %s" % comp.reduced_formula,
f"Miller index: {self.miller_index}",
f"Shift: {self.shift:.4f}, Scale Factor: {self.scale_factor.__str__()}",
]
def to_s(x):
return "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10) for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10) for i in self.lattice.angles]))
outs.append(f"Sites ({len(self)})")
for i, site in enumerate(self):
outs.append(
" ".join(
[
str(i + 1),
site.species_string,
" ".join([to_s(j).rjust(12) for j in site.frac_coords]),
]
)
)
return "\n".join(outs)
def as_dict(self):
"""
:return: MSONAble dict
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["oriented_unit_cell"] = self.oriented_unit_cell.as_dict()
d["miller_index"] = self.miller_index
d["shift"] = self.shift
d["scale_factor"] = self.scale_factor.tolist()
d["reconstruction"] = self.reconstruction
d["energy"] = self.energy
return d
@classmethod
def from_dict(cls, d):
"""
:param d: dict
:return: Creates slab from dict.
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Slab(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
miller_index=d["miller_index"],
oriented_unit_cell=Structure.from_dict(d["oriented_unit_cell"]),
shift=d["shift"],
scale_factor=d["scale_factor"],
site_properties=s.site_properties,
energy=d["energy"],
)
def get_surface_sites(self, tag=False):
"""
Returns the surface sites and their indices in a dictionary. The
oriented unit cell of the slab will determine the coordination number
of a typical site. We use VoronoiNN to determine the
coordination number of bulk sites and slab sites. Due to the
pathological error resulting from some surface sites in the
VoronoiNN, we assume any site that has this error is a surface
site as well. This will work for elemental systems only for now. Useful
for analysis involving broken bonds and for finding adsorption sites.
Args:
tag (bool): Option to adds site attribute "is_surfsite" (bool)
to all sites of slab. Defaults to False
Returns:
A dictionary grouping sites on top and bottom of the slab
together.
{"top": [sites with indices], "bottom": [sites with indices}
TODO:
Is there a way to determine site equivalence between sites in a slab
and bulk system? This would allow us get the coordination number of
a specific site for multi-elemental systems or systems with more
than one unequivalent site. This will allow us to use this for
compound systems.
"""
from pymatgen.analysis.local_env import VoronoiNN
# Get a dictionary of coordination numbers
# for each distinct site in the structure
a = SpacegroupAnalyzer(self.oriented_unit_cell)
ucell = a.get_symmetrized_structure()
cn_dict = {}
v = VoronoiNN()
unique_indices = [equ[0] for equ in ucell.equivalent_indices]
for i in unique_indices:
el = ucell[i].species_string
if el not in cn_dict.keys():
cn_dict[el] = []
# Since this will get the cn as a result of the weighted polyhedra, the
# slightest difference in cn will indicate a different environment for a
# species, eg. bond distance of each neighbor or neighbor species. The
# decimal place to get some cn to be equal.
cn = v.get_cn(ucell, i, use_weights=True)
cn = float("%.5f" % (round(cn, 5)))
if cn not in cn_dict[el]:
cn_dict[el].append(cn)
v = VoronoiNN()
surf_sites_dict, properties = {"top": [], "bottom": []}, []
for i, site in enumerate(self):
# Determine if site is closer to the top or bottom of the slab
top = site.frac_coords[2] > self.center_of_mass[2]
try:
# A site is a surface site, if its environment does
# not fit the environment of other sites
cn = float("%.5f" % (round(v.get_cn(self, i, use_weights=True), 5)))
if cn < min(cn_dict[site.species_string]):
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
else:
properties.append(False)
except RuntimeError:
# or if pathological error is returned, indicating a surface site
properties.append(True)
key = "top" if top else "bottom"
surf_sites_dict[key].append([site, i])
if tag:
self.add_site_property("is_surf_site", properties)
return surf_sites_dict
def get_symmetric_site(self, point, cartesian=False):
"""
This method uses symmetry operations to find equivalent sites on
both sides of the slab. Works mainly for slabs with Laue
symmetry. This is useful for retaining the non-polar and
symmetric properties of a slab when creating adsorbed
structures or symmetric reconstructions.
Arg:
point: Fractional coordinate.
Returns:
point: Fractional coordinate. A point equivalent to the
parameter point, but on the other side of the slab
"""
sg = SpacegroupAnalyzer(self)
ops = sg.get_symmetry_operations(cartesian=cartesian)
# Each operation on a point will return an equivalent point.
# We want to find the point on the other side of the slab.
for op in ops:
slab = self.copy()
site2 = op.operate(point)
if "%.6f" % (site2[2]) == "%.6f" % (point[2]):
continue
# Add dummy site to check the overall structure is symmetric
slab.append("O", point, coords_are_cartesian=cartesian)
slab.append("O", site2, coords_are_cartesian=cartesian)
sg = SpacegroupAnalyzer(slab)
if sg.is_laue():
break
# If not symmetric, remove the two added
# sites and try another symmetry operator
slab.remove_sites([len(slab) - 1])
slab.remove_sites([len(slab) - 1])
return site2
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False):
"""
Class method for adding a site at a specified point in a slab.
Will add the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
specie (str): The specie to add
point (coords): The coordinate of the site in the slab to add.
coords_are_cartesian (bool): Is the point in cartesian coordinates
Returns:
(Slab): The modified slab
"""
# For now just use the species of the
# surface atom as the element to add
# Get the index of the corresponding site at the bottom
point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian)
self.append(specie, point, coords_are_cartesian=coords_are_cartesian)
self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
def symmetrically_remove_atoms(self, indices):
"""
Class method for removing sites corresponding to a list of indices.
Will remove the corresponding site on the other side of the
slab to maintain equivalent surfaces.
Arg:
indices ([indices]): The indices of the sites
in the slab to remove.
"""
slabcopy = SpacegroupAnalyzer(self.copy()).get_symmetrized_structure()
points = [slabcopy[i].frac_coords for i in indices]
removal_list = []
for pt in points:
# Get the index of the original site on top
cart_point = slabcopy.lattice.get_cartesian_coords(pt)
dist = [site.distance_from_point(cart_point) for site in slabcopy]
site1 = dist.index(min(dist))
# Get the index of the corresponding site at the bottom
for i, eq_sites in enumerate(slabcopy.equivalent_sites):
if slabcopy[site1] in eq_sites:
eq_indices = slabcopy.equivalent_indices[i]
break
i1 = eq_indices[eq_sites.index(slabcopy[site1])]
for i2 in eq_indices:
if i2 == i1:
continue
if slabcopy[i2].frac_coords[2] == slabcopy[i1].frac_coords[2]:
continue
# Test site remove to see if it results in symmetric slab
s = self.copy()
s.remove_sites([i1, i2])
if s.is_symmetric():
removal_list.extend([i1, i2])
break
# If expected, 2 atoms are removed per index
if len(removal_list) == 2 * len(indices):
self.remove_sites(removal_list)
else:
warnings.warn("Equivalent sites could not be found for removal for all indices. Surface unchanged.")
class SlabGenerator:
"""
This class generates different slabs using shift values determined by where
a unique termination can be found along with other criterias such as where a
termination doesn't break a polyhedral bond. The shift value then indicates
where the slab layer will begin and terminate in the slab-vacuum system.
.. attribute:: oriented_unit_cell
A unit cell of the parent structure with the miller
index of plane parallel to surface
.. attribute:: parent
Parent structure from which Slab was derived.
.. attribute:: lll_reduce
Whether or not the slabs will be orthogonalized
.. attribute:: center_slab
Whether or not the slabs will be centered between
the vacuum layer
.. attribute:: slab_scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: min_slab_size
Minimum size in angstroms of layers containing atoms
.. attribute:: min_vac_size
Minimize size in angstroms of layers containing vacuum
"""
def __init__(
self,
initial_structure,
miller_index,
min_slab_size,
min_vacuum_size,
lll_reduce=False,
center_slab=False,
in_unit_planes=False,
primitive=True,
max_normal_search=None,
reorient_lattice=True,
):
"""
Calculates the slab scale factor and uses it to generate a unit cell
of the initial structure that has been oriented by its miller index.
Also stores the initial information needed later on to generate a slab.
Args:
initial_structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
min_slab_size (float): In Angstroms or number of hkl planes
min_vacuum_size (float): In Angstroms or number of hkl planes
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
in_unit_planes (bool): Whether to set min_slab_size and min_vac_size
in units of hkl planes (True) or Angstrom (False/default).
Setting in units of planes is useful for ensuring some slabs
have a certain nlayer of atoms. e.g. for Cs (100), a 10 Ang
slab will result in a slab with only 2 layer of atoms, whereas
Fe (100) will have more layer of atoms. By using units of hkl
planes instead, we ensure both slabs
have the same number of atoms. The slab thickness will be in
min_slab_size/math.ceil(self._proj_height/dhkl)
multiples of oriented unit cells.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is the third vector of the lattice matrix
"""
# pylint: disable=E1130
# Add Wyckoff symbols of the bulk, will help with
# identfying types of sites in the slab system
sg = SpacegroupAnalyzer(initial_structure)
initial_structure.add_site_property("bulk_wyckoff", sg.get_symmetry_dataset()["wyckoffs"])
initial_structure.add_site_property("bulk_equivalent", sg.get_symmetry_dataset()["equivalent_atoms"].tolist())
latt = initial_structure.lattice
miller_index = _reduce_vector(miller_index)
# Calculate the surface normal using the reciprocal lattice vector.
recp = latt.reciprocal_lattice_crystallographic
normal = recp.get_cartesian_coords(miller_index)
normal /= np.linalg.norm(normal)
slab_scale_factor = []
non_orth_ind = []
eye = np.eye(3, dtype=np.int_)
for i, j in enumerate(miller_index):
if j == 0:
# Lattice vector is perpendicular to surface normal, i.e.,
# in plane of surface. We will simply choose this lattice
# vector as one of the basis vectors.
slab_scale_factor.append(eye[i])
else:
# Calculate projection of lattice vector onto surface normal.
d = abs(np.dot(normal, latt.matrix[i])) / latt.abc[i]
non_orth_ind.append((i, d))
# We want the vector that has maximum magnitude in the
# direction of the surface normal as the c-direction.
# Results in a more "orthogonal" unit cell.
c_index, dist = max(non_orth_ind, key=lambda t: t[1])
if len(non_orth_ind) > 1:
lcm_miller = lcm(*[miller_index[i] for i, d in non_orth_ind])
for (i, di), (j, dj) in itertools.combinations(non_orth_ind, 2):
l = [0, 0, 0]
l[i] = -int(round(lcm_miller / miller_index[i]))
l[j] = int(round(lcm_miller / miller_index[j]))
slab_scale_factor.append(l)
if len(slab_scale_factor) == 2:
break
if max_normal_search is None:
slab_scale_factor.append(eye[c_index])
else:
index_range = sorted(
reversed(range(-max_normal_search, max_normal_search + 1)),
key=lambda x: abs(x),
)
candidates = []
for uvw in itertools.product(index_range, index_range, index_range):
if (not any(uvw)) or abs(np.linalg.det(slab_scale_factor + [uvw])) < 1e-8:
continue
vec = latt.get_cartesian_coords(uvw)
l = np.linalg.norm(vec)
cosine = abs(np.dot(vec, normal) / l)
candidates.append((uvw, cosine, l))
if abs(abs(cosine) - 1) < 1e-8:
# If cosine of 1 is found, no need to search further.
break
# We want the indices with the maximum absolute cosine,
# but smallest possible length.
uvw, cosine, l = max(candidates, key=lambda x: (x[1], -x[2]))
slab_scale_factor.append(uvw)
slab_scale_factor = np.array(slab_scale_factor)
# Let's make sure we have a left-handed crystallographic system
if np.linalg.det(slab_scale_factor) < 0:
slab_scale_factor *= -1
# Make sure the slab_scale_factor is reduced to avoid
# unnecessarily large slabs
reduced_scale_factor = [_reduce_vector(v) for v in slab_scale_factor]
slab_scale_factor = np.array(reduced_scale_factor)
single = initial_structure.copy()
single.make_supercell(slab_scale_factor)
# When getting the OUC, lets return the most reduced
# structure as possible to reduce calculations
self.oriented_unit_cell = Structure.from_sites(single, to_unit_cell=True)
self.max_normal_search = max_normal_search
self.parent = initial_structure
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.slab_scale_factor = slab_scale_factor
self.miller_index = miller_index
self.min_vac_size = min_vacuum_size
self.min_slab_size = min_slab_size
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self._normal = normal
a, b, c = self.oriented_unit_cell.lattice.matrix
self._proj_height = abs(np.dot(normal, c))
self.reorient_lattice = reorient_lattice
def get_slab(self, shift=0, tol=0.1, energy=None):
"""
This method takes in shift value for the c lattice direction and
generates a slab based on the given shift. You should rarely use this
method. Instead, it is used by other generation algorithms to obtain
all slabs.
Arg:
shift (float): A shift value in Angstrom that determines how much a
slab should be shifted.
tol (float): Tolerance to determine primitive cell.
energy (float): An energy to assign to the slab.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
h = self._proj_height
p = round(h / self.parent.lattice.d_hkl(self.miller_index), 8)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
species = self.oriented_unit_cell.species_and_occu
props = self.oriented_unit_cell.site_properties
props = {k: v * nlayers_slab for k, v in props.items()}
frac_coords = self.oriented_unit_cell.frac_coords
frac_coords = np.array(frac_coords) + np.array([0, 0, -shift])[None, :]
frac_coords -= np.floor(frac_coords)
a, b, c = self.oriented_unit_cell.lattice.matrix
new_lattice = [a, b, nlayers * c]
frac_coords[:, 2] = frac_coords[:, 2] / nlayers
all_coords = []
for i in range(nlayers_slab):
fcoords = frac_coords.copy()
fcoords[:, 2] += i / nlayers
all_coords.extend(fcoords)
slab = Structure(new_lattice, species * nlayers_slab, all_coords, site_properties=props)
scale_factor = self.slab_scale_factor
# Whether or not to orthogonalize the structure
if self.lll_reduce:
lll_slab = slab.copy(sanitize=True)
mapping = lll_slab.lattice.find_mapping(slab.lattice)
scale_factor = np.dot(mapping[2], scale_factor)
slab = lll_slab
# Whether or not to center the slab layer around the vacuum
if self.center_slab:
avg_c = np.average([c[2] for c in slab.frac_coords])
slab.translate_sites(list(range(len(slab))), [0, 0, 0.5 - avg_c])
if self.primitive:
prim = slab.get_primitive_structure(tolerance=tol)
if energy is not None:
energy = prim.volume / slab.volume * energy
slab = prim
# Reorient the lattice to get the correct reduced cell
ouc = self.oriented_unit_cell.copy()
if self.primitive:
# find a reduced ouc
slab_l = slab.lattice
ouc = ouc.get_primitive_structure(
constrain_latt={
"a": slab_l.a,
"b": slab_l.b,
"alpha": slab_l.alpha,
"beta": slab_l.beta,
"gamma": slab_l.gamma,
}
)
# Check this is the correct oriented unit cell
ouc = self.oriented_unit_cell if slab_l.a != ouc.lattice.a or slab_l.b != ouc.lattice.b else ouc
return Slab(
slab.lattice,
slab.species_and_occu,
slab.frac_coords,
self.miller_index,
ouc,
shift,
scale_factor,
energy=energy,
site_properties=slab.site_properties,
reorient_lattice=self.reorient_lattice,
)
def _calculate_possible_shifts(self, tol=0.1):
frac_coords = self.oriented_unit_cell.frac_coords
n = len(frac_coords)
if n == 1:
# Clustering does not work when there is only one data point.
shift = frac_coords[0][2] + 0.5
return [shift - math.floor(shift)]
# We cluster the sites according to the c coordinates. But we need to
# take into account PBC. Let's compute a fractional c-coordinate
# distance matrix that accounts for PBC.
dist_matrix = np.zeros((n, n))
h = self._proj_height
# Projection of c lattice vector in
# direction of surface normal.
for i, j in itertools.combinations(list(range(n)), 2):
if i != j:
cdist = frac_coords[i][2] - frac_coords[j][2]
cdist = abs(cdist - round(cdist)) * h
dist_matrix[i, j] = cdist
dist_matrix[j, i] = cdist
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
clusters = fcluster(z, tol, criterion="distance")
# Generate dict of cluster# to c val - doesn't matter what the c is.
c_loc = {c: frac_coords[i][2] for i, c in enumerate(clusters)}
# Put all c into the unit cell.
possible_c = [c - math.floor(c) for c in sorted(c_loc.values())]
# Calculate the shifts
nshifts = len(possible_c)
shifts = []
for i in range(nshifts):
if i == nshifts - 1:
# There is an additional shift between the first and last c
# coordinate. But this needs special handling because of PBC.
shift = (possible_c[0] + 1 + possible_c[i]) * 0.5
if shift > 1:
shift -= 1
else:
shift = (possible_c[i] + possible_c[i + 1]) * 0.5
shifts.append(shift - math.floor(shift))
shifts = sorted(shifts)
return shifts
def _get_c_ranges(self, bonds):
c_ranges = []
bonds = {(get_el_sp(s1), get_el_sp(s2)): dist for (s1, s2), dist in bonds.items()}
for (sp1, sp2), bond_dist in bonds.items():
for site in self.oriented_unit_cell:
if sp1 in site.species:
for nn in self.oriented_unit_cell.get_neighbors(site, bond_dist):
if sp2 in nn.species:
c_range = tuple(sorted([site.frac_coords[2], nn.frac_coords[2]]))
if c_range[1] > 1:
# Takes care of PBC when c coordinate of site
# goes beyond the upper boundary of the cell
c_ranges.append((c_range[0], 1))
c_ranges.append((0, c_range[1] - 1))
elif c_range[0] < 0:
# Takes care of PBC when c coordinate of site
# is below the lower boundary of the unit cell
c_ranges.append((0, c_range[1]))
c_ranges.append((c_range[0] + 1, 1))
elif c_range[0] != c_range[1]:
c_ranges.append((c_range[0], c_range[1]))
return c_ranges
def get_slabs(
self,
bonds=None,
ftol=0.1,
tol=0.1,
max_broken_bonds=0,
symmetrize=False,
repair=False,
):
"""
This method returns a list of slabs that are generated using the list of
shift values from the method, _calculate_possible_shifts(). Before the
shifts are used to create the slabs however, if the user decides to take
into account whether or not a termination will break any polyhedral
structure (bonds is not None), this method will filter out any shift
values that do so.
Args:
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): General tolerance paramter for getting primitive
cells and matching structures
ftol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them. Set to False as repairing terminations can
lead to many possible slabs as oppose to just omitting them.
Returns:
([Slab]) List of all possible terminations of a particular surface.
Slabs are sorted by the # of bonds broken.
"""
c_ranges = [] if bonds is None else self._get_c_ranges(bonds)
slabs = []
for shift in self._calculate_possible_shifts(tol=ftol):
bonds_broken = 0
for r in c_ranges:
if r[0] <= shift <= r[1]:
bonds_broken += 1
slab = self.get_slab(shift, tol=tol, energy=bonds_broken)
if bonds_broken <= max_broken_bonds:
slabs.append(slab)
elif repair:
# If the number of broken bonds is exceeded,
# we repair the broken bonds on the slab
slabs.append(self.repair_broken_bonds(slab, bonds))
# Further filters out any surfaces made that might be the same
m = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False, scale=False)
new_slabs = []
for g in m.group_structures(slabs):
# For each unique termination, symmetrize the
# surfaces by removing sites from the bottom.
if symmetrize:
slabs = self.nonstoichiometric_symmetrized_slab(g[0])
new_slabs.extend(slabs)
else:
new_slabs.append(g[0])
match = StructureMatcher(ltol=tol, stol=tol, primitive_cell=False, scale=False)
new_slabs = [g[0] for g in match.group_structures(new_slabs)]
return sorted(new_slabs, key=lambda s: s.energy)
def repair_broken_bonds(self, slab, bonds):
"""
This method will find undercoordinated atoms due to slab
cleaving specified by the bonds parameter and move them
to the other surface to make sure the bond is kept intact.
In a future release of surface.py, the ghost_sites will be
used to tell us how the repair bonds should look like.
Arg:
slab (structure): A structure object representing a slab.
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
for pair in bonds.keys():
blength = bonds[pair]
# First lets determine which element should be the
# reference (center element) to determine broken bonds.
# e.g. P for a PO4 bond. Find integer coordination
# numbers of the pair of elements wrt to each other
cn_dict = {}
for i, el in enumerate(pair):
cnlist = []
for site in self.oriented_unit_cell:
poly_coord = 0
if site.species_string == el:
for nn in self.oriented_unit_cell.get_neighbors(site, blength):
if nn[0].species_string == pair[i - 1]:
poly_coord += 1
cnlist.append(poly_coord)
cn_dict[el] = cnlist
# We make the element with the higher coordination our reference
if max(cn_dict[pair[0]]) > max(cn_dict[pair[1]]):
element1, element2 = pair
else:
element2, element1 = pair
for i, site in enumerate(slab):
# Determine the coordination of our reference
if site.species_string == element1:
poly_coord = 0
for neighbor in slab.get_neighbors(site, blength):
poly_coord += 1 if neighbor.species_string == element2 else 0
# suppose we find an undercoordinated reference atom
if poly_coord not in cn_dict[element1]:
# We get the reference atom of the broken bonds
# (undercoordinated), move it to the other surface
slab = self.move_to_other_side(slab, [i])
# find its NNs with the corresponding
# species it should be coordinated with
neighbors = slab.get_neighbors(slab[i], blength, include_index=True)
tomove = [nn[2] for nn in neighbors if nn[0].species_string == element2]
tomove.append(i)
# and then move those NNs along with the central
# atom back to the other side of the slab again
slab = self.move_to_other_side(slab, tomove)
return slab
def move_to_other_side(self, init_slab, index_of_sites):
"""
This method will Move a set of sites to the
other side of the slab (opposite surface).
Arg:
init_slab (structure): A structure object representing a slab.
index_of_sites (list of ints): The list of indices representing
the sites we want to move to the other side.
Returns:
(Slab) A Slab object with a particular shifted oriented unit cell.
"""
slab = init_slab.copy()
# Determine what fraction the slab is of the total cell size
# in the c direction. Round to nearest rational number.
h = self._proj_height
p = h / self.parent.lattice.d_hkl(self.miller_index)
if self.in_unit_planes:
nlayers_slab = int(math.ceil(self.min_slab_size / p))
nlayers_vac = int(math.ceil(self.min_vac_size / p))
else:
nlayers_slab = int(math.ceil(self.min_slab_size / h))
nlayers_vac = int(math.ceil(self.min_vac_size / h))
nlayers = nlayers_slab + nlayers_vac
slab_ratio = nlayers_slab / nlayers
# Sort the index of sites based on which side they are on
top_site_index = [i for i in index_of_sites if slab[i].frac_coords[2] > slab.center_of_mass[2]]
bottom_site_index = [i for i in index_of_sites if slab[i].frac_coords[2] < slab.center_of_mass[2]]
# Translate sites to the opposite surfaces
slab.translate_sites(top_site_index, [0, 0, slab_ratio])
slab.translate_sites(bottom_site_index, [0, 0, -slab_ratio])
return Slab(
init_slab.lattice,
slab.species,
slab.frac_coords,
init_slab.miller_index,
init_slab.oriented_unit_cell,
init_slab.shift,
init_slab.scale_factor,
energy=init_slab.energy,
)
def nonstoichiometric_symmetrized_slab(self, init_slab):
"""
This method checks whether or not the two surfaces of the slab are
equivalent. If the point group of the slab has an inversion symmetry (
ie. belong to one of the Laue groups), then it is assumed that the
surfaces should be equivalent. Otherwise, sites at the bottom of the
slab will be removed until the slab is symmetric. Note the removal of sites
can destroy the stoichiometry of the slab. For non-elemental
structures, the chemical potential will be needed to calculate surface energy.
Arg:
init_slab (Structure): A single slab structure
Returns:
Slab (structure): A symmetrized Slab object.
"""
if init_slab.is_symmetric():
return [init_slab]
nonstoich_slabs = []
# Build an equivalent surface slab for each of the different surfaces
for top in [True, False]:
asym = True
slab = init_slab.copy()
slab.energy = init_slab.energy
while asym:
# Keep removing sites from the bottom one by one until both
# surfaces are symmetric or the number of sites removed has
# exceeded 10 percent of the original slab
c_dir = [site[2] for i, site in enumerate(slab.frac_coords)]
if top:
slab.remove_sites([c_dir.index(max(c_dir))])
else:
slab.remove_sites([c_dir.index(min(c_dir))])
if len(slab) <= len(self.parent):
break
# Check if the altered surface is symmetric
if slab.is_symmetric():
asym = False
nonstoich_slabs.append(slab)
if len(slab) <= len(self.parent):
warnings.warn("Too many sites removed, please use a larger slab size.")
return nonstoich_slabs
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "reconstructions_archive.json")) as data_file:
reconstructions_archive = json.load(data_file)
class ReconstructionGenerator:
"""
This class takes in a pre-defined dictionary specifying the parameters
need to build a reconstructed slab such as the SlabGenerator parameters,
transformation matrix, sites to remove/add and slab/vacuum size. It will
then use the formatted instructions provided by the dictionary to build
the desired reconstructed slab from the initial structure.
.. attribute:: slabgen_params
Parameters for the SlabGenerator
.. trans_matrix::
A 3x3 transformation matrix to generate the reconstructed
slab. Only the a and b lattice vectors are actually
changed while the c vector remains the same. This
matrix is what the Wood's notation is based on.
.. reconstruction_json::
The full json or dictionary containing the instructions
for building the reconstructed slab
.. termination::
The index of the termination of the slab
TODO:
- Right now there is no way to specify what atom is being
added. In the future, use basis sets?
"""
def __init__(self, initial_structure, min_slab_size, min_vacuum_size, reconstruction_name):
"""
Generates reconstructed slabs from a set of instructions
specified by a dictionary or json file.
Args:
initial_structure (Structure): Initial input structure. Note
that to ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
reconstruction (str): Name of the dict containing the instructions
for building a reconstructed slab. The dictionary can contain
any item the creator deems relevant, however any instructions
archived in pymatgen for public use needs to contain the
following keys and items to ensure compatibility with the
ReconstructionGenerator:
"name" (str): A descriptive name for the type of
reconstruction. Typically the name will have the type
of structure the reconstruction is for, the Miller
index, and Wood's notation along with anything to
describe the reconstruction: e.g.:
"fcc_110_missing_row_1x2"
"description" (str): A longer description of your
reconstruction. This is to help future contributors who
want to add other types of reconstructions to the
archive on pymatgen to check if the reconstruction
already exists. Please read the descriptions carefully
before adding a new type of reconstruction to ensure it
is not in the archive yet.
"reference" (str): Optional reference to where the
reconstruction was taken from or first observed.
"spacegroup" (dict): e.g. {"symbol": "Fm-3m", "number": 225}
Indicates what kind of structure is this reconstruction.
"miller_index" ([h,k,l]): Miller index of your reconstruction
"Woods_notation" (str): For a reconstruction, the a and b
lattice may change to accomodate the symmetry of the
reconstruction. This notation indicates the change in
the vectors relative to the primitive (p) or
conventional (c) slab cell. E.g. p(2x1):
Wood, E. A. (1964). Vocabulary of surface
crystallography. Journal of Applied Physics, 35(4),
1306–1312.
"transformation_matrix" (numpy array): A 3x3 matrix to
transform the slab. Only the a and b lattice vectors
should change while the c vector remains the same.
"SlabGenerator_parameters" (dict): A dictionary containing
the parameters for the SlabGenerator class excluding the
miller_index, min_slab_size and min_vac_size as the
Miller index is already specified and the min_slab_size
and min_vac_size can be changed regardless of what type
of reconstruction is used. Having a consistent set of
SlabGenerator parameters allows for the instructions to
be reused to consistently build a reconstructed slab.
"points_to_remove" (list of coords): A list of sites to
remove where the first two indices are fraction (in a
and b) and the third index is in units of 1/d (in c).
"points_to_add" (list of frac_coords): A list of sites to add
where the first two indices are fraction (in a an b) and
the third index is in units of 1/d (in c).
"base_reconstruction" (dict): Option to base a reconstruction on
an existing reconstruction model also exists to easily build
the instructions without repeating previous work. E.g. the
alpha reconstruction of halites is based on the octopolar
reconstruction but with the topmost atom removed. The dictionary
for the alpha reconstruction would therefore contain the item
"reconstruction_base": "halite_111_octopolar_2x2", and
additional sites for "points_to_remove" and "points_to_add"
can be added to modify this reconstruction.
For "points_to_remove" and "points_to_add", the third index for
the c vector is in units of 1/d where d is the spacing
between atoms along hkl (the c vector) and is relative to
the topmost site in the unreconstructed slab. e.g. a point
of [0.5, 0.25, 1] corresponds to the 0.5 frac_coord of a,
0.25 frac_coord of b and a distance of 1 atomic layer above
the topmost site. [0.5, 0.25, -0.5] where the third index
corresponds to a point half a atomic layer below the topmost
site. [0.5, 0.25, 0] corresponds to a point in the same
position along c as the topmost site. This is done because
while the primitive units of a and b will remain constant,
the user can vary the length of the c direction by changing
the slab layer or the vacuum layer.
NOTE: THE DICTIONARY SHOULD ONLY CONTAIN "points_to_remove" AND
"points_to_add" FOR THE TOP SURFACE. THE ReconstructionGenerator
WILL MODIFY THE BOTTOM SURFACE ACCORDINGLY TO RETURN A SLAB WITH
EQUIVALENT SURFACES.
"""
if reconstruction_name not in reconstructions_archive.keys():
raise KeyError(
"The reconstruction_name entered (%s) does not exist in the "
"archive. Please select from one of the following reconstructions: %s "
"or add the appropriate dictionary to the archive file "
"reconstructions_archive.json." % (reconstruction_name, list(reconstructions_archive.keys()))
)
# Get the instructions to build the reconstruction
# from the reconstruction_archive
recon_json = copy.deepcopy(reconstructions_archive[reconstruction_name])
new_points_to_add, new_points_to_remove = [], []
if "base_reconstruction" in recon_json.keys():
if "points_to_add" in recon_json.keys():
new_points_to_add = recon_json["points_to_add"]
if "points_to_remove" in recon_json.keys():
new_points_to_remove = recon_json["points_to_remove"]
# Build new instructions from a base reconstruction
recon_json = copy.deepcopy(reconstructions_archive[recon_json["base_reconstruction"]])
if "points_to_add" in recon_json.keys():
del recon_json["points_to_add"]
if "points_to_remove" in recon_json.keys():
del recon_json["points_to_remove"]
if new_points_to_add:
recon_json["points_to_add"] = new_points_to_add
if new_points_to_remove:
recon_json["points_to_remove"] = new_points_to_remove
slabgen_params = copy.deepcopy(recon_json["SlabGenerator_parameters"])
slabgen_params["initial_structure"] = initial_structure.copy()
slabgen_params["miller_index"] = recon_json["miller_index"]
slabgen_params["min_slab_size"] = min_slab_size
slabgen_params["min_vacuum_size"] = min_vacuum_size
self.slabgen_params = slabgen_params
self.trans_matrix = recon_json["transformation_matrix"]
self.reconstruction_json = recon_json
self.name = reconstruction_name
def build_slabs(self):
"""
Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab.
"""
slabs = self.get_unreconstructed_slabs()
recon_slabs = []
for slab in slabs:
d = get_d(slab)
top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords
# Remove any specified sites
if "points_to_remove" in self.reconstruction_json.keys():
pts_to_rm = copy.deepcopy(self.reconstruction_json["points_to_remove"])
for p in pts_to_rm:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2] + p[2] * d])[2]
cart_point = slab.lattice.get_cartesian_coords(p)
dist = [site.distance_from_point(cart_point) for site in slab]
site1 = dist.index(min(dist))
slab.symmetrically_remove_atoms([site1])
# Add any specified sites
if "points_to_add" in self.reconstruction_json.keys():
pts_to_add = copy.deepcopy(self.reconstruction_json["points_to_add"])
for p in pts_to_add:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2] + p[2] * d])[2]
slab.symmetrically_add_atom(slab[0].specie, p)
slab.reconstruction = self.name
setattr(slab, "recon_trans_matrix", self.trans_matrix)
# Get the oriented_unit_cell with the same axb area.
ouc = slab.oriented_unit_cell.copy()
ouc.make_supercell(self.trans_matrix)
slab.oriented_unit_cell = ouc
recon_slabs.append(slab)
return recon_slabs
def get_unreconstructed_slabs(self):
"""
Generates the unreconstructed or pristine super slab.
"""
slabs = []
for slab in SlabGenerator(**self.slabgen_params).get_slabs():
slab.make_supercell(self.trans_matrix)
slabs.append(slab)
return slabs
def get_d(slab):
"""
Determine the distance of space between
each layer of atoms along c
"""
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
for i, site in enumerate(sorted_sites):
if not "%.6f" % (site.frac_coords[2]) == "%.6f" % (sorted_sites[i + 1].frac_coords[2]):
d = abs(site.frac_coords[2] - sorted_sites[i + 1].frac_coords[2])
break
return slab.lattice.get_cartesian_coords([0, 0, d])[2]
def is_already_analyzed(miller_index: tuple, miller_list: list, symm_ops: list) -> bool:
"""
Helper function to check if a given Miller index is
part of the family of indices of any index in a list
Args:
miller_index (tuple): The Miller index to analyze
miller_list (list): List of Miller indices. If the given
Miller index belongs in the same family as any of the
indices in this list, return True, else return False
symm_ops (list): Symmetry operations of a
lattice, used to define family of indices
"""
for op in symm_ops:
if in_coord_list(miller_list, op.operate(miller_index)):
return True
return False
def get_symmetrically_equivalent_miller_indices(structure, miller_index, return_hkil=True):
"""
Returns all symmetrically equivalent indices for a given structure. Analysis
is based on the symmetry of the reciprocal lattice of the structure.
Args:
miller_index (tuple): Designates the family of Miller indices
to find. Can be hkl or hkil for hexagonal systems
return_hkil (bool): If true, return hkil form of Miller
index for hexagonal systems, otherwise return hkl
"""
# Change to hkl if hkil because in_coord_list only handles tuples of 3
miller_index = (miller_index[0], miller_index[1], miller_index[3]) if len(miller_index) == 4 else miller_index
mmi = max(np.abs(miller_index))
r = list(range(-mmi, mmi + 1))
r.reverse()
sg = SpacegroupAnalyzer(structure)
# Get distinct hkl planes from the rhombohedral setting if trigonal
if sg.get_crystal_system() == "trigonal":
prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure()
symm_ops = prim_structure.lattice.get_recp_symmetry_operation()
else:
symm_ops = structure.lattice.get_recp_symmetry_operation()
equivalent_millers = [miller_index]
for miller in itertools.product(r, r, r):
if miller == miller_index:
continue
if any(i != 0 for i in miller):
if is_already_analyzed(miller, equivalent_millers, symm_ops):
equivalent_millers.append(miller)
# include larger Miller indices in the family of planes
if all(mmi > i for i in np.abs(miller)) and not in_coord_list(equivalent_millers, miller):
if is_already_analyzed(mmi * np.array(miller), equivalent_millers, symm_ops):
equivalent_millers.append(miller)
if return_hkil and sg.get_crystal_system() in ["trigonal", "hexagonal"]:
return [(hkl[0], hkl[1], -1 * hkl[0] - hkl[1], hkl[2]) for hkl in equivalent_millers]
return equivalent_millers
def get_symmetrically_distinct_miller_indices(structure, max_index, return_hkil=False):
"""
Returns all symmetrically distinct indices below a certain max-index for
a given structure. Analysis is based on the symmetry of the reciprocal
lattice of the structure.
Args:
structure (Structure): input structure.
max_index (int): The maximum index. For example, a max_index of 1
means that (100), (110), and (111) are returned for the cubic
structure. All other indices are equivalent to one of these.
return_hkil (bool): If true, return hkil form of Miller
index for hexagonal systems, otherwise return hkl
"""
r = list(range(-max_index, max_index + 1))
r.reverse()
# First we get a list of all hkls for conventional (including equivalent)
conv_hkl_list = [miller for miller in itertools.product(r, r, r) if any(i != 0 for i in miller)]
sg = SpacegroupAnalyzer(structure)
# Get distinct hkl planes from the rhombohedral setting if trigonal
if sg.get_crystal_system() == "trigonal":
transf = sg.get_conventional_to_primitive_transformation_matrix()
miller_list = [hkl_transformation(transf, hkl) for hkl in conv_hkl_list]
prim_structure = SpacegroupAnalyzer(structure).get_primitive_standard_structure()
symm_ops = prim_structure.lattice.get_recp_symmetry_operation()
else:
miller_list = conv_hkl_list
symm_ops = structure.lattice.get_recp_symmetry_operation()
unique_millers, unique_millers_conv = [], []
for i, miller in enumerate(miller_list):
d = abs(reduce(gcd, miller))
miller = tuple(int(i / d) for i in miller)
if not is_already_analyzed(miller, unique_millers, symm_ops):
if sg.get_crystal_system() == "trigonal":
# Now we find the distinct primitive hkls using
# the primitive symmetry operations and their
# corresponding hkls in the conventional setting
unique_millers.append(miller)
d = abs(reduce(gcd, conv_hkl_list[i]))
cmiller = tuple(int(i / d) for i in conv_hkl_list[i])
unique_millers_conv.append(cmiller)
else:
unique_millers.append(miller)
unique_millers_conv.append(miller)
if return_hkil and sg.get_crystal_system() in ["trigonal", "hexagonal"]:
return [(hkl[0], hkl[1], -1 * hkl[0] - hkl[1], hkl[2]) for hkl in unique_millers_conv]
return unique_millers_conv
def hkl_transformation(transf, miller_index):
"""
Returns the Miller index from setting
A to B using a transformation matrix
Args:
transf (3x3 array): The transformation matrix
that transforms a lattice of A to B
miller_index ([h, k, l]): Miller index to transform to setting B
"""
# Get a matrix of whole numbers (ints)
def lcm(a, b):
return a * b // math.gcd(a, b)
reduced_transf = reduce(lcm, [int(1 / i) for i in itertools.chain(*transf) if i != 0]) * transf
reduced_transf = reduced_transf.astype(int)
# perform the transformation
t_hkl = np.dot(reduced_transf, miller_index)
d = abs(reduce(gcd, t_hkl))
t_hkl = np.array([int(i / d) for i in t_hkl])
# get mostly positive oriented Miller index
if len([i for i in t_hkl if i < 0]) > 1:
t_hkl *= -1
return tuple(t_hkl)
def generate_all_slabs(
structure,
max_index,
min_slab_size,
min_vacuum_size,
bonds=None,
tol=0.1,
ftol=0.1,
max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
max_normal_search=None,
symmetrize=False,
repair=False,
include_reconstructions=False,
in_unit_planes=False,
):
"""
A function that finds all different slabs up to a certain miller index.
Slabs oriented under certain Miller indices that are equivalent to other
slabs in other Miller indices are filtered out using symmetry operations
to get rid of any repetitive slabs. For example, under symmetry operations,
CsCl has equivalent slabs in the (0,0,1), (0,1,0), and (1,0,0) direction.
Args:
structure (Structure): Initial input structure. Note that to
ensure that the miller indices correspond to usual
crystallographic definitions, you should supply a conventional
unit cell structure.
max_index (int): The maximum Miller index to go up to.
min_slab_size (float): In Angstroms
min_vacuum_size (float): In Angstroms
bonds ({(specie1, specie2): max_bond_dist}: bonds are
specified as a dict of tuples: float of specie1, specie2
and the max bonding distance. For example, PO4 groups may be
defined as {("P", "O"): 3}.
tol (float): Threshold parameter in fcluster in order to check
if two atoms are lying on the same plane. Default thresh set
to 0.1 Angstrom in the direction of the surface normal.
max_broken_bonds (int): Maximum number of allowable broken bonds
for the slab. Use this to limit # of slabs (some structures
may have a lot of slabs). Defaults to zero, which means no
defined bonds must be broken.
lll_reduce (bool): Whether to perform an LLL reduction on the
eventual structure.
center_slab (bool): Whether to center the slab in the cell with
equal vacuum spacing from the top and bottom.
primitive (bool): Whether to reduce any generated slabs to a
primitive cell (this does **not** mean the slab is generated
from a primitive cell, it simply means that after slab
generation, we attempt to find shorter lattice vectors,
which lead to less surface area and smaller cells).
max_normal_search (int): If set to a positive integer, the code will
conduct a search for a normal lattice vector that is as
perpendicular to the surface as possible by considering
multiples linear combinations of lattice vectors up to
max_normal_search. This has no bearing on surface energies,
but may be useful as a preliminary step to generating slabs
for absorption and other sizes. It is typical that this will
not be the smallest possible cell for simulation. Normality
is not guaranteed, but the oriented cell will have the c
vector as normal as possible (within the search range) to the
surface. A value of up to the max absolute Miller index is
usually sufficient.
symmetrize (bool): Whether or not to ensure the surfaces of the
slabs are equivalent.
repair (bool): Whether to repair terminations with broken bonds
or just omit them
include_reconstructions (bool): Whether to include reconstructed
slabs available in the reconstructions_archive.json file.
"""
all_slabs = []
for miller in get_symmetrically_distinct_miller_indices(structure, max_index):
gen = SlabGenerator(
structure,
miller,
min_slab_size,
min_vacuum_size,
lll_reduce=lll_reduce,
center_slab=center_slab,
primitive=primitive,
max_normal_search=max_normal_search,
in_unit_planes=in_unit_planes,
)
slabs = gen.get_slabs(
bonds=bonds,
tol=tol,
ftol=ftol,
symmetrize=symmetrize,
max_broken_bonds=max_broken_bonds,
repair=repair,
)
if len(slabs) > 0:
logger.debug("%s has %d slabs... " % (miller, len(slabs)))
all_slabs.extend(slabs)
if include_reconstructions:
sg = SpacegroupAnalyzer(structure)
symbol = sg.get_space_group_symbol()
# enumerate through all posisble reconstructions in the
# archive available for this particular structure (spacegroup)
for name, instructions in reconstructions_archive.items():
if "base_reconstruction" in instructions.keys():
instructions = reconstructions_archive[instructions["base_reconstruction"]]
if instructions["spacegroup"]["symbol"] == symbol:
# check if this reconstruction has a max index
# equal or less than the given max index
if max(instructions["miller_index"]) > max_index:
continue
recon = ReconstructionGenerator(structure, min_slab_size, min_vacuum_size, name)
all_slabs.extend(recon.build_slabs())
return all_slabs
def get_slab_regions(slab, blength=3.5):
"""
Function to get the ranges of the slab regions. Useful for discerning where
the slab ends and vacuum begins if the slab is not fully within the cell
Args:
slab (Structure): Structure object modelling the surface
blength (float, Ang): The bondlength between atoms. You generally
want this value to be larger than the actual bondlengths in
order to find atoms that are part of the slab
"""
fcoords, indices, all_indices = [], [], []
for site in slab:
# find sites with c < 0 (noncontiguous)
neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True)
for nn in neighbors:
if nn[0].frac_coords[2] < 0:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
if fcoords:
# If slab is noncontiguous, locate the lowest
# site within the upper region of the slab
while fcoords:
last_fcoords = copy.copy(fcoords)
last_indices = copy.copy(indices)
site = slab[indices[fcoords.index(min(fcoords))]]
neighbors = slab.get_neighbors(site, blength, include_index=True, include_image=True)
fcoords, indices = [], []
for nn in neighbors:
if 1 > nn[0].frac_coords[2] > 0 and nn[0].frac_coords[2] < site.frac_coords[2]:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
# Now locate the highest site within the lower region of the slab
upper_fcoords = []
for site in slab:
if all(nn.index not in all_indices for nn in slab.get_neighbors(site, blength)):
upper_fcoords.append(site.frac_coords[2])
coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords)
min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2]
ranges = [[0, max(upper_fcoords)], [min_top, 1]]
else:
# If the entire slab region is within the slab cell, just
# set the range as the highest and lowest site in the slab
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
ranges = [[sorted_sites[0].frac_coords[2], sorted_sites[-1].frac_coords[2]]]
return ranges
def miller_index_from_sites(lattice, coords, coords_are_cartesian=True, round_dp=4, verbose=True):
"""
Get the Miller index of a plane from a list of site coordinates.
A minimum of 3 sets of coordinates are required. If more than 3 sets of
coordinates are given, the best plane that minimises the distance to all
points will be calculated.
Args:
lattice (list or Lattice): A 3x3 lattice matrix or `Lattice` object (for
example obtained from Structure.lattice).
coords (iterable): A list or numpy array of coordinates. Can be
cartesian or fractional coordinates. If more than three sets of
coordinates are provided, the best plane that minimises the
distance to all sites will be calculated.
coords_are_cartesian (bool, optional): Whether the coordinates are
in cartesian space. If using fractional coordinates set to False.
round_dp (int, optional): The number of decimal places to round the
miller index to.
verbose (bool, optional): Whether to print warnings.
Returns:
(tuple): The Miller index.
"""
if not isinstance(lattice, Lattice):
lattice = Lattice(lattice)
return lattice.get_miller_index_from_coords(
coords,
coords_are_cartesian=coords_are_cartesian,
round_dp=round_dp,
verbose=verbose,
)
def center_slab(slab):
"""
The goal here is to ensure the center of the slab region
is centered close to c=0.5. This makes it easier to
find the surface sites and apply operations like doping.
There are three cases where the slab in not centered:
1. The slab region is completely between two vacuums in the
box but not necessarily centered. We simply shift the
slab by the difference in its center of mass and 0.5
along the c direction.
2. The slab completely spills outside the box from the bottom
and into the top. This makes it incredibly difficult to
locate surface sites. We iterate through all sites that
spill over (z>c) and shift all sites such that this specific
site is now on the other side. Repeat for all sites with z>c.
3. This is a simpler case of scenario 2. Either the top or bottom
slab sites are at c=0 or c=1. Treat as scenario 2.
Args:
slab (Slab): Slab structure to center
Returns:
Returns a centered slab structure
"""
# get a reasonable r cutoff to sample neighbors
bdists = sorted(nn[1] for nn in slab.get_neighbors(slab[0], 10) if nn[1] > 0)
r = bdists[0] * 3
all_indices = [i for i, site in enumerate(slab)]
# check if structure is case 2 or 3, shift all the
# sites up to the other side until it is case 1
for site in slab:
if any(nn[1] > slab.lattice.c for nn in slab.get_neighbors(site, r)):
shift = 1 - site.frac_coords[2] + 0.05
slab.translate_sites(all_indices, [0, 0, shift])
# now the slab is case 1, shift the center of mass of the slab to 0.5
weights = [s.species.weight for s in slab]
center_of_mass = np.average(slab.frac_coords, weights=weights, axis=0)
shift = 0.5 - center_of_mass[2]
slab.translate_sites(all_indices, [0, 0, shift])
return slab
def _reduce_vector(vector):
# small function to reduce vectors
d = abs(reduce(gcd, vector))
vector = tuple(int(i / d) for i in vector)
return vector
|
vorwerkc/pymatgen
|
pymatgen/core/surface.py
|
Python
|
mit
| 86,299
|
[
"pymatgen"
] |
c2fa975de364a2c842a3fdb907cbd5561b77dfeea6440ee0f2878073493f7cb8
|
from kmeans import *
import sys
import matplotlib.pyplot as plt
plt.ion()
if sys.version_info.major == 3:
raw_input = input
def mogEM(x, K, iters, randConst=1, minVary=0):
"""
Fits a Mixture of K Diagonal Gaussians on x.
Inputs:
x: data with one data vector in each column.
K: Number of Gaussians.
iters: Number of EM iterations.
randConst: scalar to control the initial mixing coefficients
minVary: minimum variance of each Gaussian.
Returns:
p: probabilities of clusters (or mixing coefficients).
mu: mean of the clusters, one in each column.
vary: variances for the cth cluster, one in each column.
logLikelihood: log-likelihood of data after every iteration.
"""
N, T = x.shape
# Initialize the parameters
p = randConst + np.random.rand(K, 1)
p = p / np.sum(p) # mixing coefficients
mn = np.mean(x, axis=1).reshape(-1, 1)
vr = np.var(x, axis=1).reshape(-1, 1)
mu = mn + np.random.randn(N, K) * (np.sqrt(vr) / randConst)
#uncomment when using the kmeans initialzation and comment last instruction
#mu = KMeans(x, K, 5)
vary = vr * np.ones((1, K)) * 2
vary = (vary >= minVary) * vary + (vary < minVary) * minVary
logLikelihood = np.zeros((iters, 1))
# Do iters iterations of EM
for i in xrange(iters):
# Do the E step
respTot = np.zeros((K, 1))
respX = np.zeros((N, K))
respDist = np.zeros((N, K))
ivary = 1 / vary
logNorm = np.log(p) - 0.5 * N * np.log(2 * np.pi) - \
0.5 * np.sum(np.log(vary), axis=0).reshape(-1, 1)
logPcAndx = np.zeros((K, T))
for k in xrange(K):
dis = (x - mu[:, k].reshape(-1, 1))**2
logPcAndx[k, :] = logNorm[k] - 0.5 * \
np.sum(ivary[:, k].reshape(-1, 1) * dis, axis=0)
mx = np.max(logPcAndx, axis=0).reshape(1, -1)
PcAndx = np.exp(logPcAndx - mx)
Px = np.sum(PcAndx, axis=0).reshape(1, -1)
PcGivenx = PcAndx / Px
logLikelihood[i] = np.sum(np.log(Px) + mx)
print 'Iter %d logLikelihood %.5f' % (i + 1, logLikelihood[i])
# Plot log likelihood of data
plt.figure(0)
plt.clf()
plt.plot(np.arange(i), logLikelihood[:i], 'r-')
plt.title('Log-likelihood of data versus # iterations of EM')
plt.xlabel('Iterations of EM')
plt.ylabel('Log-likelihood')
plt.draw()
plt.pause(1)
# Do the M step
# update mixing coefficients
respTot = np.mean(PcGivenx, axis=1).reshape(-1, 1)
p = respTot
# update mean
respX = np.zeros((N, K))
for k in xrange(K):
respX[:, k] = np.mean(x * PcGivenx[k, :].reshape(1, -1), axis=1)
mu = respX / respTot.T
# update variance
respDist = np.zeros((N, K))
for k in xrange(K):
respDist[:, k] = np.mean(
(x - mu[:, k].reshape(-1, 1))**2 * PcGivenx[k, :].reshape(1, -1), axis=1)
vary = respDist / respTot.T
vary = (vary >= minVary) * vary + (vary < minVary) * minVary
return p, mu, vary, logLikelihood
def mogLogLikelihood(p, mu, vary, x):
""" Computes log-likelihood of data under the specified MoG model
Inputs:
x: data with one data vector in each column.
p: probabilities of clusters.
mu: mean of the clusters, one in each column.
vary: variances for the cth cluster, one in each column.
Returns:
logLikelihood: log-likelihood of data after every iteration.
"""
K = p.shape[0]
N, T = x.shape
ivary = 1 / vary
logLikelihood = np.zeros(T)
for t in xrange(T):
# Compute log P(c)p(x|c) and then log p(x)
logPcAndx = np.log(p) - 0.5 * N * np.log(2 * np.pi) \
- 0.5 * np.sum(np.log(vary), axis=0).reshape(-1, 1) \
- 0.5 * \
np.sum(ivary * (x[:, t].reshape(-1, 1) - mu)
** 2, axis=0).reshape(-1, 1)
mx = np.max(logPcAndx, axis=0)
logLikelihood[t] = np.log(np.sum(np.exp(logPcAndx - mx))) + mx
return logLikelihood
def q2():
K = 7
iters = 10
minVary = 0.01
randConst = 1.5
# load data
inputs_train, inputs_valid, inputs_test, target_train, target_valid, target_test = LoadData(
'toronto_face.npz')
# Train a MoG model with 7 components on all training data, i.e., inputs_train,
# with both original initialization and kmeans initialization.
p, mu, vary, logLikelihood = mogEM(inputs_train, K, iters, randConst, minVary)
print p
ShowMeans(mu,1)
ShowMeans(vary,2)
def q4():
iters = 10
minVary = 0.01
randConst = 1.7
numComponents = np.array([7, 14, 21, 28, 35])
T = numComponents.shape[0]
errorTrain = np.zeros(T)
errorTest = np.zeros(T)
errorValidation = np.zeros(T)
# extract data of class 1-Anger, 4-Happy
dataQ4 = LoadDataQ4('toronto_face.npz')
# images
x_train_anger = dataQ4['x_train_anger']
x_train_happy = dataQ4['x_train_happy']
x_train = np.concatenate([x_train_anger, x_train_happy], axis=1)
x_valid = np.concatenate(
[dataQ4['x_valid_anger'], dataQ4['x_valid_happy']], axis=1)
x_test = np.concatenate(
[dataQ4['x_test_anger'], dataQ4['x_test_happy']], axis=1)
# label
y_train = np.concatenate(
[dataQ4['y_train_anger'], dataQ4['y_train_happy']])
y_valid = np.concatenate(
[dataQ4['y_valid_anger'], dataQ4['y_valid_happy']])
y_test = np.concatenate([dataQ4['y_test_anger'], dataQ4['y_test_happy']])
# Hints: this is p(d), use it based on Bayes Theorem
num_anger_train = x_train_anger.shape[1]
num_happy_train = x_train_happy.shape[1]
log_likelihood_class = np.log(
[num_anger_train, num_happy_train]) - np.log(num_anger_train + num_happy_train)
for t in xrange(T):
K = numComponents[t]
# Train a MoG model with K components
# Hints: using (x_train_anger, x_train_happy) train 2 MoGs
p2, mu2, vary2, logLikelihood2 = mogEM(x_train_anger, K, iters, randConst, minVary)
p3, mu3, vary3, logLikelihood3 = mogEM(x_train_happy, K, iters, randConst, minVary)
# Compute the probability P(d|x), classify examples, and compute error rate
# Hints: using (x_train, y_train), (x_valid, y_valid), (x_test, y_test)
# to compute error rates, you may want to use mogLogLikelihood function
#-------------------- Add your code here ------------------------------
#------------------- Answers ---------------------
logLikelihood2tr = mogLogLikelihood(p2, mu2, vary2, x_train)
logLikelihood3tr = mogLogLikelihood(p3, mu3, vary3, x_train)
logLikelihood2va = mogLogLikelihood(p2, mu2, vary2, x_valid)
logLikelihood3va = mogLogLikelihood(p3, mu3, vary3, x_valid)
logLikelihood2te = mogLogLikelihood(p2, mu2, vary2, x_test)
logLikelihood3te = mogLogLikelihood(p3, mu3, vary3, x_test)
errorTrain[t] = np.mean((logLikelihood2tr > logLikelihood3tr) == y_train)
errorValidation[t] = np.mean((logLikelihood2va > logLikelihood3va) == y_valid)
errorTest[t] = np.mean((logLikelihood2te > logLikelihood3te) == y_test)
# Plot the error rate
plt.figure(0)
plt.clf()
# to be removed before release
plt.plot(numComponents, errorTrain, 'r', label='Training')
plt.plot(numComponents, errorValidation, 'g', label='Validation')
plt.plot(numComponents, errorTest, 'b', label='Testing')
plt.xlabel('Number of Mixture Components')
plt.ylabel('Error Rate')
plt.legend()
plt.draw()
plt.pause(0.0001)
if __name__ == '__main__':
#-------------------------------------------------------------------------
q2()
#-------------------------------------------------------------------------
#q4()
raw_input('Press Enter to continue.')
|
ouyangyike/Machine-Learning-and-Data-Mining
|
Mixture of Gaussian/mogEM.py
|
Python
|
mit
| 7,998
|
[
"Gaussian"
] |
9739a58ca0b32be8660ddd8f5464c26777b6ee11a17e348eccf5952db5c600d5
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_healthcheck
version_added: "2.4"
short_description: Create, Update or Destroy a Healthcheck.
description:
- Create, Update or Destroy a Healthcheck. Currently only HTTP and
HTTPS Healthchecks are supported. Healthchecks are used to monitor
individual instances, managed instance groups and/or backend
services. Healtchecks are reusable.
- Visit
U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
for an overview of Healthchecks on GCP.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
API details on HTTP Healthchecks.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
for more details on the HTTPS Healtcheck API.
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Only supports HTTP and HTTPS Healthchecks currently.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
check_interval:
description:
- How often (in seconds) to send a health check.
default: 5
healthcheck_name:
description:
- Name of the Healthcheck.
required: true
healthcheck_type:
description:
- Type of Healthcheck.
required: true
choices: ["HTTP", "HTTPS"]
host_header:
description:
- The value of the host header in the health check request. If left
empty, the public IP on behalf of which this health
check is performed will be used.
required: true
default: ""
port:
description:
- The TCP port number for the health check request. The default value is
443 for HTTPS and 80 for HTTP.
request_path:
description:
- The request path of the HTTPS health check request.
required: false
default: "/"
state:
description: State of the Healthcheck.
required: true
choices: ["present", "absent"]
timeout:
description:
- How long (in seconds) to wait for a response before claiming
failure. It is invalid for timeout
to have a greater value than check_interval.
default: 5
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this
many consecutive failures.
default: 2
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this
many consecutive successes.
default: 2
service_account_email:
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
credentials_file:
description:
- Path to the JSON file associated with the service account email
project_id:
description:
- Your GCP project ID
'''
EXAMPLES = '''
- name: Create Minimum HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
state: present
- name: Create HTTP HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
host: my-host
request_path: /hc
check_interval: 10
timeout: 30
unhealthy_threshhold: 2
healthy_threshhold: 1
state: present
- name: Create HTTPS HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: "{{ https_healthcheck }}"
healthcheck_type: HTTPS
host_header: my-host
request_path: /hc
check_interval: 5
timeout: 5
unhealthy_threshold: 2
healthy_threshold: 1
state: present
'''
RETURN = '''
state:
description: state of the Healthcheck
returned: Always.
type: str
sample: present
healthcheck_name:
description: Name of the Healthcheck
returned: Always
type: str
sample: my-url-map
healthcheck_type:
description: Type of the Healthcheck
returned: Always
type: str
sample: HTTP
healthcheck:
description: GCP Healthcheck dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-healthcheck'
USER_AGENT_VERSION = '0.0.1'
def _validate_healthcheck_params(params):
"""
Validate healthcheck params.
Simple validation has already assumed by AnsibleModule.
:param params: Ansible dictionary containing configuration.
:type params: ``dict``
:return: True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
if params['timeout'] > params['check_interval']:
raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
params['timeout'], params['check_interval']))
return (True, '')
def _build_healthcheck_dict(params):
"""
Reformat services in Ansible Params for GCP.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP
HealthCheck (HTTP/HTTPS) API.
:rtype ``dict``
"""
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
if 'timeout' in gcp_dict:
gcp_dict['timeoutSec'] = gcp_dict['timeout']
del gcp_dict['timeout']
if 'checkInterval' in gcp_dict:
gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
del gcp_dict['checkInterval']
if 'hostHeader' in gcp_dict:
gcp_dict['host'] = gcp_dict['hostHeader']
del gcp_dict['hostHeader']
if 'healthcheckType' in gcp_dict:
del gcp_dict['healthcheckType']
return gcp_dict
def _get_req_resource(client, resource_type):
if resource_type == 'HTTPS':
return (client.httpsHealthChecks(), 'httpsHealthCheck')
else:
return (client.httpHealthChecks(), 'httpHealthCheck')
def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
"""
Get a Healthcheck from GCP.
:param client: An initialized GCE Compute Discovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.get(**args)
return GCPUtils.execute_api_client_req(req, raise_404=False)
except Exception:
raise
def create_healthcheck(client, params, project_id, resource_type='HTTP'):
"""
Create a new Healthcheck.
:param client: An initialized GCE Compute Discovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
try:
resource, _ = _get_req_resource(client, resource_type)
args = {'project': project_id, 'body': gcp_dict}
req = resource.insert(**args)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=project_id)
return (True, return_data)
except Exception:
raise
def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
"""
Delete a Healthcheck.
:param client: An initialized GCE Compute Discovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.delete(**args)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except Exception:
raise
def update_healthcheck(client, healthcheck, params, name, project_id,
resource_type='HTTP'):
"""
Update a Healthcheck.
If the healthcheck has not changed, the update will not occur.
:param client: An initialized GCE Compute Discovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param healthcheck: Name of the Url Map.
:type healthcheck: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
if ans:
return (False, 'no update necessary')
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name, 'body': gcp_dict}
req = resource.update(**args)
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except Exception:
raise
def main():
module = AnsibleModule(argument_spec=dict(
healthcheck_name=dict(required=True),
healthcheck_type=dict(required=True,
choices=['HTTP', 'HTTPS']),
request_path=dict(required=False, default='/'),
check_interval=dict(required=False, type='int', default=5),
healthy_threshold=dict(required=False, type='int', default=2),
unhealthy_threshold=dict(required=False, type='int', default=2),
host_header=dict(required=False, type='str', default=''),
timeout=dict(required=False, type='int', default=5),
port=dict(required=False, type='int'),
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['healthcheck_name'] = module.params.get('healthcheck_name')
params['healthcheck_type'] = module.params.get('healthcheck_type')
params['request_path'] = module.params.get('request_path')
params['check_interval'] = module.params.get('check_interval')
params['healthy_threshold'] = module.params.get('healthy_threshold')
params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
params['host_header'] = module.params.get('host_header')
params['timeout'] = module.params.get('timeout')
params['port'] = module.params.get('port', None)
params['state'] = module.params.get('state')
if not params['port']:
params['port'] = 80
if params['healthcheck_type'] == 'HTTPS':
params['port'] = 443
try:
_validate_healthcheck_params(params)
except Exception as e:
module.fail_json(msg=e.message, changed=False)
changed = False
json_output = {'state': params['state']}
healthcheck = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
if not healthcheck:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown healthcheck: %s" %
(params['healthcheck_name']))
else:
# Create
changed, json_output['healthcheck'] = create_healthcheck(client,
params=params,
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
elif params['state'] == 'absent':
# Delete
changed, json_output['healthcheck'] = delete_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
else:
changed, json_output['healthcheck'] = update_healthcheck(client,
healthcheck=healthcheck,
params=params,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/cloud/google/gcp_healthcheck.py
|
Python
|
gpl-3.0
| 15,347
|
[
"VisIt"
] |
865a40a33e57d5d780df8925b555c4c9c50a7e82be1b0611de684e6d485eeba4
|
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from __future__ import absolute_import
from .analysis import Analysis, DoAnalysis
|
markovmodel/adaptivemd
|
adaptivemd/analysis/__init__.py
|
Python
|
lgpl-2.1
| 1,099
|
[
"MDTraj"
] |
3a85c4ea595c622d84e50659cad62b4fefbd30949d739da72c966a93222fcc61
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
@pytest.mark.skip(reason="This is a long test")
def test_ccsdtqp():
"""Test CCSDTQP on Ne using RHF/cc-pVDZ orbitals"""
import forte.proc.scc as scc
import forte
import psi4
ref_energy = -128.679025538 # from Evangelista, J. Chem. Phys. 134, 224102 (2011).
geom = "Ne"
scf_energy, psi4_wfn = forte.utils.psi4_scf(geom, basis='cc-pVDZ', reference='RHF')
forte_objs = forte.utils.prepare_forte_objects(psi4_wfn, mo_spaces={'FROZEN_DOCC': [1, 0, 0, 0, 0, 0, 0, 0]})
calc_data = scc.run_cc(
forte_objs['as_ints'],
forte_objs['scf_info'],
forte_objs['mo_space_info'],
cc_type='cc',
max_exc=5,
e_convergence=1.0e-10
)
psi4.core.clean()
energy = calc_data[-1][1]
print(energy - ref_energy)
assert energy == pytest.approx(ref_energy, 1.0e-9)
if __name__ == "__main__":
test_ccsdtqp()
|
evangelistalab/forte
|
tests/pytest/sparse_ci/srcc/test_ccsdtqp.py
|
Python
|
lgpl-3.0
| 957
|
[
"Psi4"
] |
b1e62eb3a29943beb28fc81cf5a268051ac37df25a4bd41f4fa4dbb3a8d608a7
|
import os
import re
import jinja2
import subprocess
import markdown
import markdown_include
import MooseDocs
from MooseDocs.html2latex import Translator, BasicExtension, MooseExtension
def latex_options(parser, subparser):
"""
Command line arguments for "latex" command.
Args:
parser: The main argparse parser object.
subparser: The main argparse subparser object.
"""
tex = subparser.add_parser('latex', help='Generate a tex of pdf document from a markdown file.')
tex.add_argument('input', type=str, help="The markdown file to convert to slides.")
tex.add_argument('--template', type=str, default='basic.tex', help="The template tex file to utilize (default: %(default)s).")
tex.add_argument('--output', '-o', default=None, help="The 'tex/pdf' file to create, if a .tex extension is provide only the latex will be created. If a pdf extension is provide than the pdf will be generated and all supporting files will be cleaned-up.")
tex.add_argument('--site', default='http://mooseframework.com/docs/moose_docs/site', help='The website for where markdown links should be connected in latex/pdf file.')
tex.add_argument('--hrule', type=bool, default=False, help='Disable the use use of \hrule in generated latex (default: %(default)s).')
tex.add_argument('--headings', type=str, nargs=6, default=['section', 'subsection', 'subsubsection', 'textbf', 'underline', 'emph'], help="The latex commands for the h1, h2, h3, h4, h5, and h6 tags for the document, all must be supplied and only commands valid in the latex body are allowed.")
tex.add_argument('--documentclass', default='article', help="Set the contents of the \documentclass command (default: %(default)s).")
tex.add_argument('--paper', default='letterpaper', help="Set the papersize to utilize (default: %(default)s).")
tex.add_argument('--fontsize', default='12pt', help="Set the font size for the document (default: %(default)s).")
tex.add_argument('--margin', default='1in', help="Set the document margins (default: %(default)s).")
tex.add_argument('--linkcolor', default='blue', help="Set the hyperref package link color (default: %s(default).)")
tex.add_argument('--tableofcontents', type=bool, default=True, help="Enable/disable the table of contents for the document (default: %(default)s).")
tex.add_argument('--title', type=str, default=None, help="The title of the document.")
tex.add_argument('--subtitle', type=str, default=None, help="The sub title of the document, require 'title' option.")
tex.add_argument('--author', type=str, default=None, help="The author(s) to include on the titlepage, requires 'title' option.")
tex.add_argument('--today', type=bool, default=True, help="Insert the current date on the titlepage, requires 'title' option.")
tex.add_argument('--institution', type=str, default=None, help="Insert the institution on the titlepage, requires 'title' option.")
return tex
def generate_html(input, config_file):
"""
Generates html from Moose flavored markdown.
Args:
input[str]: The *.md file to convert.
config_file[str]: The *.yml configuration file.
"""
# Load the config to extract MooseMarkdown settings
#TODO: Make this more robust
config = MooseDocs.yaml_load(config_file)
md_config = config['markdown_extensions'][-1]['MooseDocs.extensions.MooseMarkdown']
md_config['dot_ext'] = 'svg'
# Convert markdown
with open(input, 'r') as fid:
md = fid.read()
# Extract Jinja2 blocks
settings = dict()
def sub(match):
settings[match.group(1).strip()] = eval(match.group(2))
return ''
md = re.sub(r'@\+\s*set\s+(.*?)=(.*?)\+@', sub, md)
moose = MooseDocs.extensions.MooseMarkdown(**md_config)
parser = markdown.Markdown(extensions=[moose, 'markdown_include.include', 'admonition', 'mdx_math', 'toc', 'extra'])
return parser.convert(md), settings
def generate_latex(html, **kwargs):
"""
Generate latex from html.
Args:
key, value pairs are passed in from main latex command.
"""
# The template .tex file
template = kwargs.pop('template')
# Options used by html2latex conversion
config = dict()
for option in ['hrule', 'site', 'headings']:
config[option] = kwargs.pop(option)
# Build latex
h2l = Translator(extensions=[BasicExtension(**config), MooseExtension(**config)])
tex = h2l.convert(html)
# Build the latex preamble
kwargs['preamble'] = h2l.preamble()
env = jinja2.Environment(loader=jinja2.FileSystemLoader('templates'),
variable_start_string='++',
variable_end_string='++',
comment_start_string='%%',
comment_end_string='%%',
block_start_string='@+',
block_end_string='+@')
template = env.get_template(template)
return template.render(content=tex, **kwargs)
def generate_pdf(tex_file, output):
"""
Create the PDF file using pdflatex and bibtex.
"""
# Working directory
cwd = os.path.dirname(tex_file)
# Call pdflatex
local_file = os.path.basename(tex_file)
subprocess.call(["pdflatex", local_file], cwd=cwd)
subprocess.call(["bibtex", os.path.splitext(local_file)[0]], cwd=cwd)
subprocess.call(["pdflatex", local_file], cwd=cwd)
subprocess.call(["pdflatex", local_file], cwd=cwd)
# Clean-up
for ext in ['.out', '.tex', '.aux', '.log', '.spl', '.bbl', '.toc', '.lof', '.lot', '.blg']:
tmp = tex_file.replace('.tex', ext)
if os.path.exists(tmp):
os.remove(tmp)
def latex(config_file=None, output=None, input=None, **kwargs):
"""
Command for converting markdown file to latex.
"""
# Check that input is valid
if not os.path.exists(input):
raise Exception("The supplied input file does not exist: {}".format(input))
# Determine the output file name
if not output:
output = os.path.splitext(input)[0] + '.pdf'
# Build html
html, settings = generate_html(input, config_file)
# Merge settings from markdown file with arguments passed in to this command
for key, value in kwargs.iteritems():
if not value and key in settings:
kwargs[key] = settings[key]
# Generate latex
tex = generate_latex(html, **kwargs)
# Write tex file
tex_file = output
if output.endswith('.pdf'):
tex_file = output.replace('.pdf', '.tex')
with open(tex_file, 'w') as fid:
fid.write(tex.encode('utf8'))
# Create PDF
if output.endswith('.pdf'):
generate_pdf(tex_file, output)
|
vityurkiv/Ox
|
python/MooseDocs/commands/latex.py
|
Python
|
lgpl-2.1
| 6,417
|
[
"MOOSE"
] |
9f814edf9536fe7471463bfca867d7d906d39e51c72c98ee137aa247d62ebe72
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
#
# Monte Carlo simulation of the kinematic extraction with pPXF. It is useful
# to determine the desired value for the BIAS keyword of the pPXF procedure.
# This procedure generates a plot similar (but not identical) to Figure 6 in
# Cappellari & Emsellem, 2004, PASP, 116, 138.
#
# A rough guideline to determine the BIAS value is the following: choose the *largest*
# value which make sure that in the range sigma>3*velScale and for (S/N)>30 the true values
# for the Gauss-Hermite parameters are well within the rms scatter of the measured values.
# See the documentation in the file ppxf.pro for a more accurate description.
#
# V1.0.0: By Michele Cappellari, Leiden, 28 March 2003
# V1.1.0: Included in the standard PPXF distribution. After feedback
# from Alejandro Garcia Bedregal. MC, Leiden, 13 April 2005
# V1.1.1: Adjust GOODPIXELS according to the size of the convolution kernel.
# MC, Oxford, 13 April 2010
# V1.1.2: Use Coyote Graphics (http://www.idlcoyote.com/) by David W. Fanning.
# The required routines are now included in NASA IDL Astronomy Library.
# MC, Oxford, 29 July 2011
# V2.0.0: Translated from IDL into Python. MC, Oxford, 9 December 2013
# V2.0.1: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# V2.0.2: Support both Pyfits and Astropy to read FITS files.
# MC, Oxford, 22 October 2015
#
##############################################################################
from __future__ import print_function
try:
import pyfits
except:
from astropy.io import fits as pyfits
from scipy import ndimage, signal
import numpy as np
import matplotlib.pyplot as plt
from time import clock
from ppxf import ppxf
import ppxf_util as util
#----------------------------------------------------------------------------
def rebin(x,factor):
"""
Rebin a one-dimensional vector by averaging
in groups of "factor" adjacent values
"""
return np.mean(x.reshape(-1,factor),axis=1)
#----------------------------------------------------------------------------
def ppxf_simulation_example():
dir = 'spectra/'
file = dir + 'Rbi1.30z+0.00t12.59.fits'
hdu = pyfits.open(file)
ssp = hdu[0].data
h = hdu[0].header
lamRange = h['CRVAL1'] + np.array([0.,h['CDELT1']*(h['NAXIS1']-1)])
star, logLam, velscale = util.log_rebin(lamRange, ssp)
# The finite sampling of the observed spectrum is modeled in detail:
# the galaxy spectrum is obtained by oversampling the actual observed spectrum
# to a high resolution. This represent the true spectrum, which is later resampled
# to lower resolution to simulate the observations on the CCD. Similarly, the
# convolution with a well-sampled LOSVD is done on the high-resolution spectrum,
# and later resampled to the observed resolution before fitting with PPXF.
factor = 10 # Oversampling integer factor for an accurate convolution
starNew = ndimage.interpolation.zoom(star,factor,order=1) # This is the underlying spectrum, known at high resolution
star = rebin(starNew,factor) # Make sure that the observed spectrum is the integral over the pixels
vel = 0.3 # velocity in *pixels* [=V(km/s)/velScale]
h3 = 0.1 # Adopted G-H parameters of the LOSVD
h4 = -0.1
sn = 60. # Adopted S/N of the Monte Carlo simulation
m = 300 # Number of realizations of the simulation
sigmaV = np.linspace(0.8,4,m) # Range of sigma in *pixels* [=sigma(km/s)/velScale]
result = np.zeros((m,4)) # This will store the results
t = clock()
np.random.seed(123) # for reproducible results
for j in range(m):
sigma = sigmaV[j]
dx = int(abs(vel)+4.0*sigma) # Sample the Gaussian and GH at least to vel+4*sigma
x = np.linspace(-dx,dx,2*dx*factor+1) # Evaluate the Gaussian using steps of 1/factor pixels.
w = (x - vel)/sigma
w2 = w**2
gauss = np.exp(-0.5*w2)/(np.sqrt(2.*np.pi)*sigma*factor) # Normalized total(gauss)=1
h3poly = w*(2.*w2 - 3.)/np.sqrt(3.) # H3(y)
h4poly = (w2*(4.*w2 - 12.) + 3.)/np.sqrt(24.) # H4(y)
losvd = gauss *(1. + h3*h3poly + h4*h4poly)
galaxy = signal.fftconvolve(starNew,losvd,mode="same") # Convolve the oversampled spectrum
galaxy = rebin(galaxy,factor) # Integrate spectrum into original spectral pixels
noise = galaxy/sn # 1sigma error spectrum
galaxy = np.random.normal(galaxy, noise) # Add noise to the galaxy spectrum
start = np.array([vel+np.random.random(), sigma*np.random.uniform(0.85,1.15)])*velscale # Convert to km/s
pp = ppxf(star, galaxy, noise, velscale, start,
goodpixels=np.arange(dx,galaxy.size-dx),
plot=False, moments=4, bias=0.5)
result[j,:] = pp.sol
print('Calculation time: %.2f s' % (clock()-t))
plt.clf()
plt.subplot(221)
plt.plot(sigmaV*velscale, result[:,0]-vel*velscale, '+k')
plt.plot(sigmaV*velscale, sigmaV*velscale*0, '-r')
plt.ylim(-40, 40)
plt.xlabel('$\sigma_{in}\ (km\ s^{-1})$')
plt.ylabel('$V - V_{in}\ (km\ s^{-1}$)')
plt.subplot(222)
plt.plot(sigmaV*velscale, result[:,1]-sigmaV*velscale, '+k')
plt.plot(sigmaV*velscale, sigmaV*velscale*0, '-r')
plt.ylim(-40, 40)
plt.xlabel('$\sigma_{in}\ (km\ s^{-1})$')
plt.ylabel('$\sigma - \sigma_{in}\ (km\ s^{-1}$)')
plt.subplot(223)
plt.plot(sigmaV*velscale, result[:,2], '+k')
plt.plot(sigmaV*velscale, sigmaV*velscale*0+h3, '-r')
plt.ylim(-0.2+h3, 0.2+h3)
plt.xlabel('$\sigma_{in}\ (km\ s^{-1})$')
plt.ylabel('$h_3$')
plt.subplot(224)
plt.plot(sigmaV*velscale, result[:,3], '+k')
plt.plot(sigmaV*velscale, sigmaV*velscale*0+h4, '-r')
plt.ylim(-0.2+h4, 0.2+h4)
plt.xlabel('$\sigma_{in}\ (km\ s^{-1})$')
plt.ylabel('$h_4$')
plt.tight_layout()
plt.pause(0.01)
#----------------------------------------------------------------------------
if __name__ == '__main__':
ppxf_simulation_example()
|
moustakas/impy
|
lib/ppxf/ppxf_simulation_example.py
|
Python
|
gpl-2.0
| 6,151
|
[
"Galaxy",
"Gaussian"
] |
b182a81625797c6d9a6f3004b0034beaefad44e9c3fe82cd7dbbe1d42253a020
|
import copy
from django.http.response import HttpResponse
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.standard.cases.basic import CaseListReport
from corehq.apps.api.es import CaseES
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DataTablesColumnGroup
from dimagi.utils.decorators.memoized import memoized
from corehq.elastic import stream_es_query, ES_URLS
from custom.bihar.reports.display import MCHMotherDisplay, MCHChildDisplay
from dimagi.utils.timezones import utils as tz_utils
import pytz
from corehq.apps.reports.tasks import export_all_rows_task
from custom.bihar.utils import get_all_owner_ids_from_group
class MCHBaseReport(CustomProjectReport, CaseListReport):
ajax_pagination = True
asynchronous = True
exportable = True
exportable_all = True
emailable = False
fix_left_col = True
report_template_path = "bihar/reports/report.html"
model = None
fields = [
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.select.SelectOpenCloseFilter',
]
@property
def case_filter(self):
group_id = self.request_params.get('group', '')
filters = []
if group_id:
group = Group.get(group_id)
users_in_group = get_all_owner_ids_from_group(group)
if users_in_group:
or_stm = []
for user_id in users_in_group:
or_stm.append({'term': {'owner_id': user_id}})
filters.append({"or": or_stm})
else:
filters.append({'term': {'owner_id': group_id}})
return {'and': filters} if filters else {}
@property
@memoized
def case_es(self):
return CaseES(self.domain)
@property
@memoized
def rendered_report_title(self):
return self.name
def date_to_json(self, date):
return tz_utils.adjust_datetime_to_timezone\
(date, pytz.utc.zone, self.timezone.zone).strftime\
('%d/%m/%Y') if date else ""
@property
def get_all_rows(self):
query_results = stream_es_query(q=self.es_query, es_url=ES_URLS["cases"], size=999999, chunksize=100)
case_displays = (self.model(self, self.get_case(case))
for case in query_results)
return self.get_cases(case_displays)
def build_query(self, case_type=None, afilter=None, status=None, owner_ids=None, user_ids=None, search_string=None):
def _domain_term():
return {"term": {"domain.exact": self.domain}}
subterms = [_domain_term(), afilter] if afilter else [_domain_term()]
if case_type:
subterms.append({"term": {"type.exact": case_type}})
if status:
subterms.append({"term": {"closed": (status == 'closed')}})
and_block = {'and': subterms} if subterms else {}
es_query = {
'query': {
'filtered': {
'query': {"match_all": {}},
'filter': and_block
}
},
'sort': self.get_sorting_block(),
'from': self.pagination.start,
'size': self.pagination.count,
}
return es_query
@property
@memoized
def es_query(self):
query = self.build_query(case_type=self.case_type, afilter=self.case_filter,
status=self.case_status)
return query
@property
@request_cache("export")
def export_response(self):
self.request.datespan = None
export_all_rows_task.delay(self.__class__, self.__getstate__())
return HttpResponse()
@property
def rows(self):
case_displays = (self.model(self, self.get_case(case))
for case in self.es_results['hits'].get('hits', []))
return self.get_cases(case_displays)
@property
def export_table(self):
table = super(MCHBaseReport, self).export_table
# remove first row from table headers
table[0][1].pop(0)
return table
class MotherMCHRegister(MCHBaseReport):
name = "Mother MCH register"
slug = "mother_mch_register"
default_case_type = "cc_bihar_pregnancy"
model = MCHMotherDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Husband Name"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Full address"), sortable=False),
DataTablesColumn(_("MCTS ID"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("Mother DOB / AGE"), sortable=False),
DataTablesColumn(_("JSY beneficiary"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False),
DataTablesColumn(_("LMP"), sortable=False),
DataTablesColumn(_("EDD"), sortable=False)),
DataTablesColumnGroup(
_("First ANC (within 12 weeks)"),
DataTablesColumn(_("ANC 1 Date"), sortable=False),
DataTablesColumn(_("ANC 1 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 1 Weight"), sortable=False),
DataTablesColumn(_("ANC Hb"), sortable=False),
DataTablesColumn(_("ANC1 completed within 12 weeks? "), sortable=False)),
DataTablesColumnGroup(
_("Second ANC (14-26 weeks)"),
DataTablesColumn(_("ANC 2 Date"), sortable=False),
DataTablesColumn(_("ANC 2 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 2 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Third ANC (28-34 weeks)"),
DataTablesColumn(_("ANC 3 Date"), sortable=False),
DataTablesColumn(_("ANC 3 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 3 Weight"), sortable=False)),
DataTablesColumnGroup(
_("Fourth ANC (34 weeks to Delivery)"),
DataTablesColumn(_("ANC 4 Date"), sortable=False),
DataTablesColumn(_("ANC 4 Blood Pressure"), sortable=False),
DataTablesColumn(_("ANC 4 Weight"), sortable=False),
DataTablesColumn(_("TT1 date"), sortable=False),
DataTablesColumn(_("TT2 date"), sortable=False),
DataTablesColumn(_("TT Booster"), sortable=False),
DataTablesColumn(_("Received date of 100 IFA tablets "), sortable=False),
DataTablesColumn(_("Anemia"), sortable=False),
DataTablesColumn(_("Any complications"), sortable=False),
DataTablesColumn(_("RTI /STI <yes/no>"), sortable=False)),
DataTablesColumnGroup(
_("Pregnancy Outcome"),
DataTablesColumn(_("Date of delivery"), sortable=False),
DataTablesColumn(
_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Nature of delivery"), sortable=False),
DataTablesColumn(_("Complications"), sortable=False),
DataTablesColumn(_("Discharge date"), sortable=False),
DataTablesColumn(_("Received date of JSY benefits"), sortable=False),
DataTablesColumn(_("Abortion type"), sortable=False)),
DataTablesColumnGroup(
_("Post Delivery Details"),
DataTablesColumn(
_("First PNC visit (within 48 hours / within 7 days/ after 7 days)"), sortable=False),
DataTablesColumn(_("Complications after delivery"), sortable=False),
DataTablesColumn(_("Type of family planning adopted after delivery"), sortable=False),
DataTablesColumn(_("Checked mother and infant immediate after delivery?"), sortable=False),
DataTablesColumn(_("Infant outcome number code"), sortable=False)),
DataTablesColumnGroup(
_("Child 1 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 2 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 3 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False)),
DataTablesColumnGroup(
_("Child 4 Details"),
DataTablesColumn(_("Name of the child"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("First weight at birth"), sortable=False),
DataTablesColumn(_("Breastfed within an hour?"), sortable=False),
DataTablesColumn(_("Migrate status "), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.mother_name,
disp.husband_name,
disp.ward_number,
disp.village,
disp.mcts_id,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.jsy_beneficiary,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.lmp,
disp.edd,
disp.anc_date_1,
disp.blood_pressure_1,
disp.weight_1,
disp.hemoglobin,
disp.anc_completed,
disp.anc_date_2,
disp.blood_pressure_2,
disp.weight_2,
disp.anc_date_3,
disp.blood_pressure_3,
disp.weight_3,
disp.anc_date_4,
disp.blood_pressure_4,
disp.weight_4,
disp.tt1_date,
disp.tt2_date,
disp.tt_booster,
disp.ifa_tablets,
disp.anemia,
disp.complications,
disp.rti_sti,
disp.add,
disp.home_sba_assist,
disp.delivery_nature,
disp.complications,
disp.discharge_date,
disp.jsy_money_date,
disp.abortion_type,
disp.first_pnc_time,
disp.delivery_complications,
disp.family_planning_type,
disp.all_pnc_on_time,
disp.num_children,
disp.case_name_1,
disp.gender_1,
disp.first_weight_1,
disp.breastfed_hour_1,
disp.case_name_2,
disp.gender_2,
disp.first_weight_2,
disp.breastfed_hour_2,
disp.case_name_3,
disp.gender_3,
disp.first_weight_3,
disp.breastfed_hour_3,
disp.case_name_4,
disp.gender_4,
disp.first_weight_4,
disp.breastfed_hour_4,
disp.status
]
@property
def fixed_cols_spec(self):
return dict(num=2, width=350)
class ChildMCHRegister(MCHBaseReport):
name = "Child MCH register"
slug = "child_mch_register"
default_case_type = "cc_bihar_newborn"
model = MCHChildDisplay
@property
def headers(self):
headers = DataTablesHeader(DataTablesColumn(_("CHW Name")),
DataTablesColumn(_("Child Name"), sortable=False),
DataTablesColumn(_("Father and Mother Name"), sortable=False),
DataTablesColumnGroup(
_("Beneficiary Information"),
DataTablesColumn(_("Mother's MCTS ID"), sortable=False),
DataTablesColumn(_("Gender"), sortable=False),
DataTablesColumn(_("City/ward/village"), sortable=False),
DataTablesColumn(_("Address"), sortable=False),
DataTablesColumn(_("Mobile number"), sortable=False),
DataTablesColumn(_("Whose Mobile Number"), sortable=False),
DataTablesColumn(_("DOB / AGE"), sortable=False),
DataTablesColumn(_("Place of delivery (home - SBA/Non-SBA) (Hospital - public/private)"), sortable=False),
DataTablesColumn(_("Caste"), sortable=False)),
DataTablesColumnGroup(
_("Provider Information"),
DataTablesColumn(_("ASHA Name"), sortable=False),
DataTablesColumn(_("Asha phone"), sortable=False),
DataTablesColumn(_("AWC Code , AWC name"), sortable=False),
DataTablesColumn(_("AWW name"), sortable=False),
DataTablesColumn(_("AWW phone number"), sortable=False)),
DataTablesColumnGroup(
_("At Birth"),
DataTablesColumn(_("BCG"), sortable=False),
DataTablesColumn(_("OPV0"), sortable=False),
DataTablesColumn(_("Hepatitis-Birth dose "), sortable=False)),
DataTablesColumnGroup(
_("At 6 Weeks"),
DataTablesColumn(_("DPT1"), sortable=False),
DataTablesColumn(_("OPV1"), sortable=False),
DataTablesColumn(_("Hepatitis-B1"), sortable=False)),
DataTablesColumnGroup(
_("At 10 Weeks"),
DataTablesColumn(_("DPT2"), sortable=False),
DataTablesColumn(_("OPV2"), sortable=False),
DataTablesColumn(_("Hepatitis-B2"), sortable=False)),
DataTablesColumnGroup(
_("At 14 Weeks"),
DataTablesColumn(_("DPT3"), sortable=False),
DataTablesColumn(_("OPV3"), sortable=False),
DataTablesColumn(_("Hepatitis-B3"), sortable=False)),
DataTablesColumnGroup(
_("Between 9-12 Months"),
DataTablesColumn(_("Measles (1st dose)"), sortable=False)),
DataTablesColumnGroup(
_("Between 16-24 Months"),
DataTablesColumn(
_("Vitamin A dose-1 "), sortable=False),
DataTablesColumn(_("Measles (2nd dose)/ MR Vaccine"))),
DataTablesColumnGroup(
_("After 2 Years"),
DataTablesColumn(_("DPT Booster"), sortable=False),
DataTablesColumn(_("OPV Booster"), sortable=False),
DataTablesColumn(_("Vitamin A dose-2"), sortable=False),
DataTablesColumn(_("Vitamin A dose-3"), sortable=False),
DataTablesColumn(_("JE Vaccine"), sortable=False))
)
return headers
@classmethod
def get_cases(self, case_displays):
for disp in case_displays:
yield [
disp.chw_name,
disp.child_name,
disp.father_mother_name,
disp.mcts_id,
disp.gender,
disp.ward_number,
disp.village,
disp.mobile_number,
disp.mobile_number_whose,
disp.dob_age,
disp.home_sba_assist,
disp.caste,
disp.asha_name,
disp.asha_number,
disp.awc_code_name,
disp.aww_name,
disp.aww_number,
disp.bcg_date,
disp.opv_0_date,
disp.hep_b_0_date,
disp.dpt_1_date,
disp.opv_1_date,
disp.hep_b_1_date,
disp.dpt_2_date,
disp.opv_2_date,
disp.hep_b_2_date,
disp.dpt_3_date,
disp.opv_3_date,
disp.hep_b_3_date,
disp.measles_date,
disp.vit_a_1_date,
disp.date_measles_booster,
disp.dpt_booster_date,
disp.opv_booster_date,
disp.vit_a_2_date,
disp.vit_a_3_date,
disp.date_je
]
@property
def fixed_cols_spec(self):
return dict(num=3, width=450)
|
SEL-Columbia/commcare-hq
|
custom/bihar/reports/mch_reports.py
|
Python
|
bsd-3-clause
| 21,503
|
[
"VisIt"
] |
751765acc1932a72a7ec47976cbadf72a5bd2433f9db8d053134858ae7488e88
|
"""
Plots gaussian mixture model
"""
import numpy
from scipy.stats import gumbel_r
from scipy.stats import norm
from numpy import random
import os,site,sys
from math import *
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
base_path="%s/src"%base_path
for directory_name in os.listdir(base_path):
site.addsitedir(os.path.join(base_path, directory_name))
import hmmer
import pylab
from mixture import GaussianMixtureModel
import matplotlib.pyplot as plt
from sklearn import mixture
def best_scores(hits):
prev = hits[0]
prev_name,_ = prev
buf = []
maxScores = []
for hit in hits:
name,score = hit
if name==prev_name:
buf.append(score)
else:
maxScores.append(max(buf))
buf = [score]
prev_name = name
return maxScores
folder = "/Users/mortonyt/Documents/MiamiBio/workspace"
toxin_scores = hmmer.parse_scores("%s/boa_scores.out"%folder)
#modifier_scores = hmmer.parse_scores("%s/modifier.out"%folder)
#immunity_scores = hmmer.parse_scores("%s/immunity.out"%folder)
#regulator_scores = hmmer.parse_scores("%s/regulator.out"%folder)
#transport_scores = hmmer.parse_scores("%s/transport.out"%folder)
bagel_scores = hmmer.parse_scores("%s/bagel_toxin.out"%folder)
#all_scores = numpy.array(toxin_scores+modifier_scores+immunity_scores+regulator_scores+transport_scores)
all_scores = toxin_scores
print all_scores[:10]
print "Number of all scores",len(all_scores)
print "Number of bagel scores",len(bagel_scores)
gmm = GaussianMixtureModel(all_scores)
#params = gmm.expectation_maximization(1000)
g = mixture.GMM(n_components=2)
model = g.fit(all_scores)
x = numpy.linspace(min(all_scores),max(all_scores),1000)
logprob,reps = model.score_samples(x)
pdf = numpy.exp(logprob)
indiv_pdf = reps*pdf[:,numpy.newaxis]
print "Calculated pdf"
n, bins, patches=plt.hist(all_scores, 100, normed=True,histtype="stepfilled")
hmmer=plt.setp(patches, 'facecolor', 'r', 'alpha', 0.5,)
n, bins, patches=plt.hist(bagel_scores, 30,normed=True,histtype="stepfilled")
bagel=plt.setp(patches, 'facecolor', 'b', 'alpha', 0.5)
plt.legend(['HMMER scores','Bagel scores'])
plt.xlabel('Score')
plt.ylabel('Normalized Counts')
plt.xlim(0,1000)
plt.ylim(0,0.02)
#plt.plot(x, pdf, '-k')
#plt.plot(x, indiv_pdf, '--r')
plt.show()
|
idoerg/BOA
|
scripts/plot_mixture.py
|
Python
|
gpl-3.0
| 2,262
|
[
"Gaussian"
] |
100deafd87e6cd3d188c2d705bce7aa3b5141f1d304ce81d7595f772fbce6d4b
|
import os
import re
from setuptools import setup, find_packages
_here = os.path.dirname(__file__)
_init = os.path.join(_here, 'van', 'contactology', '__init__.py')
_init = open(_init, 'r').read()
VERSION = re.search(r'^__version__ = "(.*)"', _init, re.MULTILINE).group(1)
README = open(os.path.join(_here, 'README.txt'), 'r').read()
install_requires = [
'pyOpenSSL',
'setuptools',
'six',
'Twisted']
setup(name="van.contactology",
version=VERSION,
packages=find_packages(),
description="Contactology API for Twisted",
author_email='brian@vanguardistas.net',
long_description=README,
namespace_packages=["van"],
install_requires=install_requires,
test_suite="van.contactology.tests",
tests_require=['mock'],
include_package_data=True,
zip_safe=False,
)
|
jinty/van.contactology
|
setup.py
|
Python
|
bsd-3-clause
| 836
|
[
"Brian"
] |
84f5342265eb29c4d297586cbf7dfec5c842e8ca5a756d8a730ba6810b89e5f0
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
"""
## skchem.core.mol
Defining molecules in scikit-chem.
"""
import copy
import rdkit.Chem
import rdkit.Chem.inchi
from rdkit.Chem import AddHs, RemoveHs
from rdkit.Chem.rdMolDescriptors import CalcMolFormula, CalcExactMolWt
import json
from .atom import AtomView
from .bond import BondView
from .conformer import ConformerView
from .base import ChemicalObject, PropertyView
from ..utils import Suppressor
class Mol(rdkit.Chem.rdchem.Mol, ChemicalObject):
"""Class representing a Molecule in scikit-chem.
Mol objects inherit directly from rdkit Mol objects. Therefore, they
contain atom and bond information, and may also include properties and
atom bookmarks.
Example:
Constructors are implemented as class methods with the `from_` prefix.
>>> import skchem
>>> m = skchem.Mol.from_smiles('CC(=O)Cl'); m # doctest: +ELLIPSIS
<Mol name="None" formula="C2H3ClO" at ...>
This is an rdkit Mol:
>>> from rdkit.Chem import Mol as RDKMol
>>> isinstance(m, RDKMol)
True
A name can be given at initialization:
>>> m = skchem.Mol.from_smiles('CC(=O)Cl', name='acetyl chloride'); m # doctest: +ELLIPSIS
<Mol name="acetyl chloride" formula="C2H3ClO" at ...>
>>> m.name
'acetyl chloride'
Serializers are implemented as instance methods with the `to_` prefix.
>>> m.to_smiles()
'CC(=O)Cl'
>>> m.to_inchi()
'InChI=1S/C2H3ClO/c1-2(3)4/h1H3'
>>> m.to_inchi_key()
'WETWJCDKMRHUPV-UHFFFAOYSA-N'
RDKit properties are accessible through the `props` property:
>>> m.SetProp('example_key', 'example_value') # set prop with rdkit directly
>>> m.props['example_key']
'example_value'
>>> m.SetIntProp('float_key', 42) # set int prop with rdkit directly
>>> m.props['float_key']
42
They can be set too:
>>> m.props['example_set'] = 'set_value'
>>> m.GetProp('example_set') # getting with rdkit directly
'set_value'
We can export the properties into a dict or a pandas series:
>>> m.props.to_series()
example_key example_value
example_set set_value
float_key 42
dtype: object
Atoms and bonds are provided in views:
>>> m.atoms # doctest: +ELLIPSIS
<AtomView values="['C', 'C', 'O', 'Cl']" at ...>
>>> m.bonds # doctest: +ELLIPSIS
<BondView values="['C-C', 'C=O', 'C-Cl']" at ...>
These are iterable:
>>> [a.symbol for a in m.atoms]
['C', 'C', 'O', 'Cl']
The view provides shorthands for some attributes to get these:
>>> m.atoms.symbol # doctest: +ELLIPSIS
array(['C', 'C', 'O', 'Cl'], dtype=...)
Atom and bond props can also be set:
>>> m.atoms[0].props['atom_key'] = 'atom_value'
>>> m.atoms[0].props['atom_key']
'atom_value'
The properties for atoms on the whole molecule can be accessed like so:
>>> m.atoms.props # doctest: +ELLIPSIS
<MolPropertyView values="{'atom_key': ['atom_value', None, None, None]}" at ...>
The properties can be exported as a pandas dataframe
>>> m.atoms.props.to_frame()
atom_key
atom_idx
0 atom_value
1 None
2 None
3 None
"""
def __init__(self, *args, **kwargs):
"""
The default constructor.
Note:
This will be rarely used, as it can only create an empty molecule.
Args:
*args: Arguments to be passed to the rdkit Mol constructor.
**kwargs: Arguments to be passed to the rdkit Mol constructor.
"""
super(Mol, self).__init__(*args, **kwargs)
self.__two_d = None # set in constructor
@property
def name(self):
""" str: The name of the molecule.
Raises:
KeyError"""
try:
return self.GetProp('_Name')
except KeyError:
return None
@name.setter
def name(self, value):
if value is None:
self.ClearProp('_Name')
else:
self.SetProp('_Name', value)
@property
def atoms(self):
""" List[skchem.Atom]: An iterable over the atoms of the molecule. """
if not hasattr(self, '_atoms'):
self._atoms = AtomView(self)
return self._atoms
@property
def bonds(self):
""" List[skchem.Bond]: An iterable over the bonds of the molecule. """
if not hasattr(self, '_bonds'):
self._bonds = BondView(self)
return self._bonds
@property
def mass(self):
""" float: the mass of the molecule. """
return CalcExactMolWt(self)
@property
def props(self):
""" PropertyView: A dictionary of the properties of the molecule. """
if not hasattr(self, '_props'):
self._props = PropertyView(self)
return self._props
@property
def conformers(self):
""" List[Conformer]: conformers of the molecule. """
if not hasattr(self, '_conformers'):
self._conformers = ConformerView(self)
return self._conformers
def to_formula(self):
""" str: the chemical formula of the molecule.
Raises:
RuntimeError"""
# formula may be undefined if atoms are uncertainly typed
# e.g. if the molecule was initialize through SMARTS
try:
with Suppressor():
return CalcMolFormula(self)
except RuntimeError:
raise ValueError('Formula is undefined for {}'.format(self))
def add_hs(self, inplace=False, add_coords=True, explicit_only=False,
only_on_atoms=False):
""" Add hydrogens to self.
Args:
inplace (bool):
Whether to add Hs to `Mol`, or return a new `Mol`.
add_coords (bool):
Whether to set 3D coordinate for added Hs.
explicit_only (bool):
Whether to add only explicit Hs, or also implicit ones.
only_on_atoms (iterable<bool>):
An iterable specifying the atoms to add Hs.
Returns:
skchem.Mol:
`Mol` with Hs added.
"""
if inplace:
msg = 'Inplace addition of Hs is not yet supported.'
raise NotImplementedError(msg)
raw = AddHs(self, addCoords=add_coords, onlyOnAtoms=only_on_atoms,
explicitOnly=explicit_only)
return self.__class__.from_super(raw)
def remove_hs(self, inplace=False, sanitize=True, update_explicit=False,
implicit_only=False):
""" Remove hydrogens from self.
Args:
inplace (bool):
Whether to add Hs to `Mol`, or return a new `Mol`.
sanitize (bool):
Whether to sanitize after Hs are removed.
update_explicit (bool):
Whether to update explicit count after the removal.
implicit_only (bool):
Whether to remove explict and implicit Hs, or Hs only.
Returns:
skchem.Mol:
`Mol` with Hs removed.
"""
if inplace:
msg = 'Inplace removed of Hs is not yet supported.'
raise NotImplementedError(msg)
raw = RemoveHs(self, implicitOnly=implicit_only,
updateExplicitCount=update_explicit, sanitize=sanitize)
return self.__class__.from_super(raw)
def to_dict(self, kind="chemdoodle", conformer_id=-1):
""" A dictionary representation of the molecule.
Args:
kind (str):
The type of representation to use. Only `chemdoodle` is
currently supported.
Returns:
dict:
dictionary representation of the molecule."""
if kind == "chemdoodle":
return self._to_dict_chemdoodle(conformer_id=conformer_id)
else:
raise NotImplementedError
def _to_dict_chemdoodle(self, conformer_id=-1):
""" Chemdoodle dict representation of the molecule.
Documentation of the format may be found on the `chemdoodle website \
<https://web.chemdoodle.com/docs/chemdoodle-json-format>`_"""
try:
pos = self.conformers[conformer_id].positions
except IndexError as e:
if conformer_id == -1:
# no conformers available, so we generate one with 2d coords,
# save the positions, then delete the conf
self.conformers.append_2d()
pos = self.conformers[0].positions
del self.conformers[0]
else:
raise e
atoms = [{'x': p[0], 'y': p[1], 'z': p[2], 'l': s}
for s, p in zip(self.atoms.symbol, pos.round(4))]
bonds = [b.to_dict() for b in self.bonds]
return {"m": [{"a": atoms, "b": bonds}]}
def to_json(self, kind='chemdoodle'):
""" Serialize a molecule using JSON.
Args:
kind (str):
The type of serialization to use. Only `chemdoodle` is
currently supported.
Returns:
str: the json string. """
return json.dumps(self.to_dict(kind=kind))
def to_inchi_key(self):
""" The InChI key of the molecule.
Returns:
str: the InChI key.
Raises:
RuntimeError"""
if not rdkit.Chem.inchi.INCHI_AVAILABLE:
raise ImportError("InChI module not available.")
res = rdkit.Chem.InchiToInchiKey(self.to_inchi())
if res is None:
raise RuntimeError("An InChI key could not be generated.")
return res
def to_binary(self):
""" Serialize the molecule to binary encoding.
Returns:
bytes: the molecule in bytes.
Notes:
Due to limitations in RDKit, not all data is serialized. Notably,
properties are not, so e.g. compound names are not saved."""
return self.ToBinary()
@classmethod
def from_binary(cls, binary):
""" Decode a molecule from a binary serialization.
Args:
binary: The bytes string to decode.
Returns:
skchem.Mol: The molecule encoded in the binary."""
return cls(binary)
def copy(self):
""" Return a copy of the molecule. """
return Mol.from_super(copy.deepcopy(self))
def __repr__(self):
try:
formula = self.to_formula()
except ValueError:
# if we can't generate the formula, just say it is unknown
formula = 'unknown'
return '<{klass} name="{name}" formula="{form}" at {address}>'.format(
klass=self.__class__.__name__,
name=self.name,
form=formula,
address=hex(id(self)))
def __contains__(self, item):
if isinstance(item, Mol):
return self.HasSubstructMatch(item)
else:
msg = 'No way to check if {} contains {}'.format(self, item)
raise NotImplementedError(msg)
def __eq__(self, item):
if isinstance(item, self.__class__):
return (self in item) and (item in self)
else:
return False
def __str__(self):
return '<Mol: {}>'.format(self.to_smiles())
def bind_constructor(constructor_name, name_to_bind=None):
""" Bind an (rdkit) constructor to the class """
@classmethod
def constructor(_, in_arg, name=None, *args, **kwargs):
""" The constructor to be bound. """
m = getattr(rdkit.Chem, 'MolFrom' + constructor_name)(in_arg, *args,
**kwargs)
if m is None:
raise ValueError('Failed to parse molecule, {}'.format(in_arg))
m = Mol.from_super(m)
m.name = name
return m
setattr(Mol, 'from_{}'.format(constructor_name).lower()
if name_to_bind is None else name_to_bind, constructor)
def bind_serializer(serializer_name, name_to_bind=None):
""" Bind an (rdkit) serializer to the class """
def serializer(self, *args, **kwargs):
""" The serializer to be bound. """
with Suppressor():
return getattr(rdkit.Chem, 'MolTo' + serializer_name)(self, *args,
**kwargs)
setattr(Mol, 'to_{}'.format(serializer_name).lower()
if name_to_bind is None else name_to_bind, serializer)
CONSTRUCTORS = ['Inchi', 'Smiles', 'Mol2Block', 'Mol2File', 'MolBlock',
'MolFile', 'PDBBlock', 'PDBFile', 'Smarts', 'TPLBlock',
'TPLFile']
SERIALIZERS = ['Inchi', 'Smiles', 'MolBlock', 'MolFile', 'PDBBlock', 'Smarts',
'TPLBlock', 'TPLFile']
list(map(bind_constructor, CONSTRUCTORS))
list(map(bind_serializer, SERIALIZERS))
|
richlewis42/scikit-chem
|
skchem/core/mol.py
|
Python
|
bsd-3-clause
| 13,323
|
[
"RDKit"
] |
37387b57993e997498241edebcdeacf3a64319db7f5be5a117c77fb528f527e1
|
import numpy
import vtk
from .raw import points as to_vtk_points
from .raw import vertices as to_vtk_vertices
from .raw import polygons as to_vtk_polygons
from numpy2vtk.exceptions import Numpy2VtkFormatException
def mesh(points, polys, z_index=0):
"""
Returns the VTK-representation of a mesh that is build by creating the patches specified by points and polys.
Points are the considered points and polys consists of an array of patches (which consist of indices into the
points array).
Args:
points (numpy.ndarray<float> or vtk.vtkPoints): The points that the mesh consist of.
If it's a numpy array it should be of dimensions (n,2) or (n,3)
polys (numpy.ndarray<int>): Array of patches, should be of shape nxm for n patches with m points per patch
z_index (float): The value the z-value of 2d-points is filled with (only applicable for (n,2) input arrays)
Returns:
poly_data (vtk.vtkPolyData): VTK polydata representation of the mesh
"""
if not (isinstance(points, numpy.ndarray) or isinstance(points, vtk.vtkPoints)):
raise Numpy2VtkFormatException(
'mesh points needs to be numpy array or vtk.vtkPoints'
)
if isinstance(points, numpy.ndarray):
vtk_points = to_vtk_points(points, z_index=z_index)
else:
vtk_points = points
number_of_points = vtk_points.GetNumberOfPoints()
if numpy.logical_or(polys > number_of_points-1, polys < 0).any():
raise Numpy2VtkFormatException(
'mesh polys references a point index that does not exist'
)
vtk_vertices = to_vtk_vertices(numpy.array(range(number_of_points), dtype=numpy.int))
vtk_polygons = to_vtk_polygons(polys)
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(vtk_points)
poly_data.SetVerts(vtk_vertices)
poly_data.SetPolys(vtk_polygons)
return poly_data
|
selaux/numpy2vtk
|
numpy2vtk/data/mesh.py
|
Python
|
lgpl-3.0
| 1,903
|
[
"VTK"
] |
8b15bc56b8f5b606b9ce4a7e0239933ebe0ae413244274f0dda7e3d18c893cdf
|
## Copyright (c) 2001-2009, Scott D. Peckham
## November 2009 (converted from IDL)
#-----------------------------------------------------------------------
# Notes: Use the random midpoint displacement method to create
# a fractal surface/landscape (due to Mandelbrot).
# This can be used as initial surface for landscape
# evolution models and is used by Alan Howard's
# MARSSIM model.
#-----------------------------------------------------------------------
#
# unit_test()
# make_fractal_surface()
#
#-----------------------------------------------------------------------
from numpy import *
import numpy
import rtg_files
import rti_files
#-----------------------------------------------------------------------
def unit_test():
z = make_fractal_surface(n_levels=8, H=1.5)
print 'min(z), max(z) =', z.min(), z.max()
print 'Finished with unit_test().'
print ' '
# unit_test()
#-----------------------------------------------------------------------
def make_fractal_surface(n_levels, H=1.5, RTG_file=None,
sigma=float64(1),
scale=float64(1),
seed=168993,
X_WRAP=False, Y_WRAP=False,
SILENT=False):
#---------------------------------------------------------
# Notes: Can apply SCALE at very end. A value of about
# 0.01 should give results similar to Howard's
# MATRIX_2D with ERROR=0.02.
# H is a fractal exponent of some kind.
# Set the X_WRAP or Y_WRAP keywords in order to
# impose periodic boundaries on the left & right
# or top & bottom, respectively.
# If n_levels = 7, nx = 129
# If n_levels = 8, nx = 257
# If n_levels = 9, nx = 513
# If n_levels = 10, nx = 1025
# If n_levels = 11, nx = 2049
#----------------------------------------------------------
if (n_levels > 11):
print '********************************************'
print ' ERROR: Max number of levels is 11,'
print ' which gives a grid size of 2049 x 2049.'
print '********************************************'
print ' '
return
if not(SILENT):
print 'Creating a fractal surface...'
#------------------
# Initialize vars
#------------------
factor = float64(1) / sqrt(float64(2) ** H) #############
nx = (int32(2) ** n_levels) + 1
ny = nx
step = nx - 1
if not(SILENT):
print 'nx, ny =', nx, ',', ny
#----------------------------------------------
x_vec = numpy.arange(nx, dtype='Int16')
y_vec = numpy.arange(ny, dtype='Int16')
cols, rows = numpy.meshgrid( x_vec, y_vec )
## rows = reshape(repeat(y_vec, nx), (ny, nx))
## cols = rot90(rows) # (will work if nx=ny)
sum_grid = (cols + rows)
#----------------------------------------------
DONE = zeros([ny, nx], dtype='UInt8')
DONE[0,0] = 1
DONE[0,nx - 1] = 1
DONE[ny - 1,0] = 1
DONE[ny - 1,nx - 1] = 1
#----------------------------------------------
EDGE = zeros([ny, nx], dtype='UInt8')
EDGE[:,0] = 1
EDGE[:,nx - 1] = 1
EDGE[0,:] = 1
EDGE[ny - 1,:] = 1
#------------------------------
# Initialize grid of z-values
#------------------------------
numpy.random.seed(seed)
v = random.normal(loc=0.0, scale=1.0, size=(2, 2))
z = zeros([ny, nx], dtype='Float64')
z[0,0] = v[0,0]
z[0,nx - 1] = v[0,1]
z[ny - 1,0] = v[1,0]
z[ny - 1,nx - 1] = v[1,1]
#------------------------------------
if (X_WRAP):
z[0,nx - 1] = z[0,0]
z[ny - 1,nx - 1] = z[ny - 1,0]
if (Y_WRAP):
z[ny - 1,0] = z[0,0]
z[ny - 1,nx - 1] = z[0,nx - 1]
#------------------------------------
zF = z.flat ## (an iterator to allow 1D indexing) ##########
for k in xrange( 1, (n_levels + 1) ):
if not(SILENT):
print 'Working on level', k
step = (step / 2)
#---------------------------------------
# Get midpoint locations of this level
#---------------------------------------
w = where(logical_and(logical_and(logical_and(((cols.flat % step) == 0), \
((rows.flat % step) == 0)),
logical_not(DONE.flat)), logical_not(EDGE.flat)))
n_mid = size(w[0])
#########################
# Need this !!
#########################
w = w[0]
#-----------------------------------------
# Break these into two groups, w1 and w2
#-----------------------------------------
a1 = where((sum_grid.flat[w] % (2 * step)) == 0) # (1D array)
n1 = size(a1[0])
a2 = where((sum_grid.flat[w] % (2 * step)) != 0) # (1D array)
n2 = size(a2[0])
if (n1 != 0):
w1 = w[a1[0]]
if (n2 != 0):
w2 = w[a2[0]]
#---------------------------------------------
# Compute midpoint elevations as the average
# of the diagonal neighbor elevations plus
# a rescaled Gaussian random variable
#---------------------------------------------
UL = w1 - step * (nx + 1)
UR = w1 - step * (nx - 1)
LL = w1 + step * (nx - 1)
LR = w1 + step * (nx + 1)
#---------------------------
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=n1)
zF[w1] = ((zF[UL] + zF[UR] + zF[LL] + zF[LR]) / float64(4)) + ran
DONE.flat[w1] = 1
#----------------------------------------------
# Compute midpoint elevations of remaining
# pixels at this scale as the average of the
# nearest neighbor elevations plus a rescaled
# Gaussian random variable. n2=0 at start.
#----------------------------------------------
if (n2 != 0):
T = w2 - (step * nx)
B = w2 + (step * nx)
R = w2 + step
L = w2 - step
#----------------------------
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=n2)
zF[w2] = ((zF[T] + zF[B] + zF[L] + zF[R]) / float64(4)) + ran
DONE.flat[w2] = 1
#--------------------------------------------
# Compute elevations of edge pixels at this
# scale as average of 3 nearest neighbors
# plus a rescaled Gaussian random variable.
#--------------------------------------------
jump = (step * nx)
#----------------------------
L = where(logical_and(logical_and((cols.flat == 0), \
((rows.flat % step) == 0)), \
logical_not(DONE.flat)))
nL = size(L[0])
T = L - jump
B = L + jump
R = L + step
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nL)
zF[L] = ((zF[T] + zF[B] + zF[R]) / float64(3)) + ran
DONE.flat[L] = 1
#-----------------------------------------------------------------------------
R = where(logical_and(logical_and((cols.flat == (nx - 1)), \
((rows.flat % step) == 0)), \
logical_not(DONE.flat)))
nR = size(R[0])
if not(X_WRAP):
L = R - step
T = R - jump
B = R + jump
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nR)
zF[R] = ((zF[L] + zF[T] + zF[B]) / float64(3)) + ran
else:
zF[R] = zF[L]
DONE.flat[R] = 1
#-----------------------------------------------------------------------------
T = where(logical_and(logical_and((rows.flat == 0), \
((cols.flat % step) == 0)), \
logical_not(DONE.flat)))
nT = size(T[0])
L = T - step
R = T + step
B = T + jump
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nT)
zF[T] = ((zF[L] + zF[R] + zF[B]) / float64(3)) + ran
DONE.flat[T] = 1
#-----------------------------------------------------------------------------
B = where(logical_and(logical_and((rows.flat == (ny - 1)), \
((cols.flat % step) == 0)), \
logical_not(DONE.flat)))
nB = size(B[0])
if not(Y_WRAP):
L = B - step
R = B + step
T = B - jump
### numpy.random.seed(seed)
ran = factor * sigma * random.normal(loc=0.0, scale=1.0, size=nB)
zF[B] = ((zF[L] + zF[R] + zF[T]) / float64(3)) + ran
else:
zF[B] = zF[T]
DONE.flat[B] = 1
#-----------------------------------------------------------------------------
#-----------------------
# Rescale the values ?
#-----------------------
if (scale != 1.0):
z = (z * scale)
#-------------------------------------------
# Option to save to RTG file with RTI file
#-------------------------------------------
if (RTG_file is not None):
###############################
# CHECK FOR OVERWRITE HERE !
###############################
###############################
info = rti_files.make_info( RTG_file, nx, ny,
xres=100.0, yres=100.0,
gmin=z.min(), gmax=z.max(),
data_source="Midpoint displacement method" )
rti_files.write_info(RTG_file, info)
RTG_type = 'FLOAT'
## RTG_type = 'DOUBLE'
rtg_files.write_grid( z, RTG_file, info, RTG_type=RTG_type)
#----------------------
# Print final message
#----------------------
if not(SILENT):
print 'Finished.'
print ' '
return z
# make_fractal_surface()
#-----------------------------------------------------------------------
|
mdpiper/topoflow
|
topoflow/utils/midpoints.py
|
Python
|
mit
| 10,558
|
[
"Gaussian"
] |
2d7d6039cfd62627390bf67f236c07a75df9627c187b0f72ca6b6eb9ebbf866c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
else:
long_description = '''MolVS is a molecule validation and standardization tool, written in Python using the RDKit
chemistry framework. Building a collection of chemical structures from different sources can be difficult due to
differing representations, drawing conventions and mistakes. MolVS can standardize chemical structures to improve data
quality, help with de-duplication and identify relationships between molecules.
'''
setup(
name='MolVS',
version='0.1.1',
author='Matt Swain',
author_email='m.swain@me.com',
license='MIT',
url='https://github.com/mcs07/MolVS',
packages=['molvs'],
description='Molecule Validation and Standardization',
long_description=long_description,
keywords='chemistry cheminformatics rdkit',
zip_safe=False,
tests_require=['pytest'],
install_requires=['six'],
entry_points={'console_scripts': ['molvs = molvs.cli:main']},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mcs07/MolVS
|
setup.py
|
Python
|
mit
| 1,917
|
[
"RDKit"
] |
f1db5efe271aa869794756e966b0055f977a32f2aad9c22baa6f2ff96158695b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text]
aliases: []
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
aliases: []
author: Brian Coca
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select'
# set to generate locales:
debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect'
# Accept oracle license
debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
# Specifying package you can register/return the list of questions and current values
debconf: name='tzdata'
'''
import pipes
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
data = ' '.join([ question, vtype, value ])
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel]
if unseen:
cmd.append('-u')
return module.run_command(' '.join(cmd), use_unsafe_shell=True)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']),
value= dict(required=False, type='str'),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
diff = ''
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
main()
|
jburwell/ansible-modules-extras
|
system/debconf.py
|
Python
|
gpl-3.0
| 5,359
|
[
"Brian"
] |
d105384f4beeeb5261f0d92a777af550f97f56485f50d3cbc909488c3d4db55d
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 15:20:45 2011
@author: josef
"""
from statsmodels.compat.python import lrange
import numpy as np
from scipy import stats
from statsmodels.sandbox.tools.mctools import StatTestMC
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.tsa.stattools import adfuller
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(5000, statindices=lrange(4))
print(mc1.summary_quantiles([1,2,3], stats.chi2([2,3,4]).ppf,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print('\n\n')
frac = [0.01, 0.025, 0.05, 0.1, 0.975]
crit = stats.chi2([2,3,4]).ppf(np.atleast_2d(frac).T)
print(mc1.summary_cdf([1,2,3], frac, crit,
varnames=['lag 1', 'lag 2', 'lag 3'],
title='acorr_ljungbox'))
print(mc1.cdf(crit, [1,2,3])[1])
#----------------------
def randwalksim(nobs=500, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def adf20(x):
return adfuller(x, 2, regression="n", autolag=None)
print(adf20(np.random.randn(100)))
mc2 = StatTestMC(randwalksim, adf20)
mc2.run(10000, statindices=[0,1])
frac = [0.01, 0.05, 0.1]
#bug
crit = np.array([-3.4996365338407074, -2.8918307730370025, -2.5829283377617176])[:,None]
print(mc2.summary_cdf([0], frac, crit,
varnames=['adf'],
title='adf'))
#bug
#crit2 = np.column_stack((crit, frac))
#print mc2.summary_cdf([0, 1], frac, crit,
# varnames=['adf'],
# title='adf')
print(mc2.quantiles([0]))
print(mc2.cdf(crit, [0]))
doplot=1
if doplot:
import matplotlib.pyplot as plt
mc1.plot_hist([3],stats.chi2([4]).pdf)
plt.title('acorr_ljungbox - MC versus chi2')
plt.show()
|
statsmodels/statsmodels
|
statsmodels/sandbox/tools/try_mctools.py
|
Python
|
bsd-3-clause
| 1,941
|
[
"ADF"
] |
31730f507fe3c829c3a75208a053d9bb4090af8f8b5f8776ce5088a4542067cd
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the page that allows learners to play through an exploration."""
import logging
from constants import constants
from core.domain import classifier_services
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import learner_progress_services
from core.domain import param_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
(classifier_models, stats_models) = models.Registry.import_models(
[models.NAMES.classifier, models.NAMES.statistics])
class ReaderPermissionsTest(test_utils.GenericTestBase):
"""Test permissions for readers to view explorations."""
EXP_ID = 'eid'
def setUp(self):
"""Before each individual test, create a dummy exploration."""
super(ReaderPermissionsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.editor = user_services.UserActionsInfo(self.editor_id)
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.editor_id, title=self.UNICODE_TEST_STRING,
category=self.UNICODE_TEST_STRING)
def test_unpublished_explorations_are_invisible_to_logged_out_users(self):
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
def test_unpublished_explorations_are_invisible_to_unconnected_users(self):
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
self.logout()
def test_unpublished_explorations_are_invisible_to_other_editors(self):
other_editor_email = 'another@example.com'
self.signup(other_editor_email, 'othereditorusername')
other_exploration = exp_domain.Exploration.create_default_exploration(
'eid2')
exp_services.save_new_exploration(
other_editor_email, other_exploration)
self.login(other_editor_email)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID),
expected_status_int=404)
self.logout()
def test_unpublished_explorations_are_visible_to_their_editors(self):
self.login(self.EDITOR_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
self.logout()
def test_unpublished_explorations_are_visible_to_admins(self):
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.login(self.ADMIN_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
self.logout()
def test_published_explorations_are_visible_to_logged_out_users(self):
rights_manager.publish_exploration(self.editor, self.EXP_ID)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
def test_published_explorations_are_visible_to_logged_in_users(self):
rights_manager.publish_exploration(self.editor, self.EXP_ID)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID))
def test_exploration_page_with_iframed_redirects(self):
self.login(self.EDITOR_EMAIL)
exp_version = self.exploration.version
response = self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID), params={
'parent': True,
'iframed': True,
'v': exp_version
}, expected_status_int=302
)
self.assertTrue(
response.headers['Location'].endswith(
'/embed/exploration/%s?v=%s' % (self.EXP_ID, exp_version)))
self.logout()
def test_exploration_page_raises_error_with_invalid_exploration_version(
self):
self.login(self.EDITOR_EMAIL)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_PREFIX, self.EXP_ID), params={
'v': 10,
'parent': True
}, expected_status_int=404
)
self.logout()
class FeedbackIntegrationTest(test_utils.GenericTestBase):
"""Test the handler for giving feedback."""
def test_give_feedback_handler(self):
"""Test giving feedback handler."""
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
# Load demo exploration.
exp_id = '0'
exp_services.delete_demo('0')
exp_services.load_demo('0')
# Viewer opens exploration.
self.login(self.VIEWER_EMAIL)
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
state_name_1 = exploration_dict['exploration']['init_state_name']
# Viewer gives 1st feedback.
self.post_json(
'/explorehandler/give_feedback/%s' % exp_id,
{
'state_name': state_name_1,
'feedback': 'This is a feedback message.',
}
)
self.logout()
class ExplorationStateClassifierMappingTests(test_utils.GenericTestBase):
"""Test the handler for initialising exploration with
state_classifier_mapping.
"""
def test_creation_of_state_classifier_mapping(self):
super(ExplorationStateClassifierMappingTests, self).setUp()
exploration_id = '15'
self.login(self.VIEWER_EMAIL)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
exp_services.delete_demo(exploration_id)
# We enable ENABLE_ML_CLASSIFIERS so that the subsequent call to
# save_exploration handles job creation for trainable states.
# Since only one demo exploration has a trainable state, we modify our
# values for MIN_ASSIGNED_LABELS and MIN_TOTAL_TRAINING_EXAMPLES to let
# the classifier_demo_exploration.yaml be trainable. This is
# completely for testing purposes.
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
with self.swap(feconf, 'MIN_TOTAL_TRAINING_EXAMPLES', 5):
with self.swap(feconf, 'MIN_ASSIGNED_LABELS', 1):
exp_services.load_demo(exploration_id)
# Retrieve job_id of created job (because of save_exp).
all_jobs = classifier_models.ClassifierTrainingJobModel.get_all()
self.assertEqual(all_jobs.count(), 1)
for job in all_jobs:
job_id = job.id
classifier_services.store_classifier_data(job_id, {})
expected_state_classifier_mapping = {
'text': {
'algorithm_id': 'TextClassifier',
'classifier_data': {},
'data_schema_version': 1
}
}
# Call the handler.
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exploration_id))
retrieved_state_classifier_mapping = exploration_dict[
'state_classifier_mapping']
self.assertEqual(
expected_state_classifier_mapping,
retrieved_state_classifier_mapping)
def test_exploration_handler_raises_error_with_invalid_version(self):
exploration_id = '15'
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
exp_services.load_demo(exploration_id)
self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exploration_id),
params={'v': 10}, expected_status_int=404)
class ExplorationPretestsUnitTest(test_utils.GenericTestBase):
"""Test the handler for initialising exploration with
state_classifier_mapping.
"""
def setUp(self):
"""Before each individual test, initialize data."""
super(ExplorationPretestsUnitTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id, 'user', 'Description')
def test_get_exploration_pretests(self):
super(ExplorationPretestsUnitTest, self).setUp()
story_id = story_services.get_new_story_id()
topic_id = topic_services.get_new_topic_id()
self.save_new_topic(
topic_id, 'user', 'Topic', 'A new topic', [], [], [], [], 0)
self.save_new_story(
story_id, 'user', 'Title', 'Description', 'Notes', topic_id
)
topic_services.add_canonical_story('user', topic_id, story_id)
changelist = [
story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 1'
})
]
story_services.update_story('user', story_id, changelist, 'Added node.')
exp_id = '0'
exp_id_2 = '1'
exp_services.delete_demo('0')
exp_services.load_demo('0')
exp_services.delete_demo('1')
exp_services.load_demo('1')
change_list = [story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS),
'old_value': [],
'new_value': [self.skill_id],
'node_id': 'node_1'
}), story_domain.StoryChange({
'cmd': story_domain.CMD_UPDATE_STORY_NODE_PROPERTY,
'property_name': (
story_domain.STORY_NODE_PROPERTY_EXPLORATION_ID),
'old_value': None,
'new_value': exp_id,
'node_id': 'node_1'
})]
story_services.update_story(
'user', story_id, change_list, 'Updated Node 1.')
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, 'user',
self._create_valid_question_data('ABC'), [self.skill_id])
question_id_2 = question_services.get_new_question_id()
self.save_new_question(
question_id_2, 'user',
self._create_valid_question_data('ABC'), [self.skill_id])
question_services.create_new_question_skill_link(
self.editor_id, question_id, self.skill_id, 0.3)
question_services.create_new_question_skill_link(
self.editor_id, question_id_2, self.skill_id, 0.5)
# Call the handler.
with self.swap(feconf, 'NUM_PRETEST_QUESTIONS', 1):
json_response_1 = self.get_json(
'%s/%s?story_id=%s&cursor=' % (
feconf.EXPLORATION_PRETESTS_URL_PREFIX, exp_id, story_id))
next_cursor = json_response_1['next_start_cursor']
self.assertEqual(len(json_response_1['pretest_question_dicts']), 1)
json_response_2 = self.get_json(
'%s/%s?story_id=%s&cursor=%s' % (
feconf.EXPLORATION_PRETESTS_URL_PREFIX, exp_id, story_id,
next_cursor))
self.assertEqual(len(json_response_2['pretest_question_dicts']), 1)
self.assertNotEqual(
json_response_1['pretest_question_dicts'][0]['id'],
json_response_2['pretest_question_dicts'][0]['id'])
self.get_json(
'%s/%s?story_id=%s' % (
feconf.EXPLORATION_PRETESTS_URL_PREFIX, exp_id_2, story_id),
expected_status_int=400)
self.get_json(
'%s/%s?story_id=%s' % (
feconf.EXPLORATION_PRETESTS_URL_PREFIX, exp_id_2, 'story'),
expected_status_int=400)
class QuestionsUnitTest(test_utils.GenericTestBase):
"""Test the handler for fetching questions."""
def setUp(self):
"""Before each individual test, initialize data."""
super(QuestionsUnitTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(self.skill_id, 'user', 'Description')
self.question_id = question_services.get_new_question_id()
self.save_new_question(
self.question_id, 'user',
self._create_valid_question_data('ABC'), [self.skill_id])
question_services.create_new_question_skill_link(
self.editor_id, self.question_id, self.skill_id, 0.5)
self.question_id_2 = question_services.get_new_question_id()
self.save_new_question(
self.question_id_2, 'user',
self._create_valid_question_data('ABC'), [self.skill_id])
question_services.create_new_question_skill_link(
self.editor_id, self.question_id_2, self.skill_id, 0.5)
def test_questions_are_returned_successfully(self):
# Call the handler.
url = '%s?question_count=%s&skill_ids=%s' % (
feconf.QUESTIONS_URL_PREFIX, '1', self.skill_id)
json_response_1 = self.get_json(url)
self.assertEqual(len(json_response_1['question_dicts']), 1)
def test_question_count_more_than_available_returns_all_questions(self):
# Call the handler.
url = '%s?question_count=%s&skill_ids=%s' % (
feconf.QUESTIONS_URL_PREFIX, '5', self.skill_id)
json_response = self.get_json(url)
self.assertEqual(len(json_response['question_dicts']), 2)
def test_multiple_skill_id_returns_questions(self):
skill_id_2 = skill_services.get_new_skill_id()
self.save_new_skill(skill_id_2, 'user', 'Description')
question_id_3 = question_services.get_new_question_id()
self.save_new_question(
question_id_3, 'user',
self._create_valid_question_data('ABC'), [self.skill_id])
question_services.create_new_question_skill_link(
self.editor_id, question_id_3, skill_id_2, 0.5)
url = '%s?question_count=%s&skill_ids=%s,%s' % (
feconf.QUESTIONS_URL_PREFIX, '3', self.skill_id, skill_id_2)
json_response = self.get_json(url)
self.assertEqual(len(json_response['question_dicts']), 3)
question_ids = [data['id'] for data in json_response['question_dicts']]
self.assertItemsEqual(
[self.question_id, self.question_id_2, question_id_3], question_ids)
def test_invalid_skill_id_returns_no_questions(self):
# Call the handler.
url = '%s?question_count=%s&skill_ids=%s' % (
feconf.QUESTIONS_URL_PREFIX, '1', 'invalid_skill_id')
json_response = self.get_json(url)
self.assertEqual(len(json_response['question_dicts']), 0)
def test_question_count_zero_raises_invalid_input_exception(self):
# Call the handler.
url = '%s?question_count=%s&skill_ids=%s' % (
feconf.QUESTIONS_URL_PREFIX, '0', self.skill_id)
self.get_json(url, expected_status_int=400)
class ExplorationParametersUnitTests(test_utils.GenericTestBase):
"""Test methods relating to exploration parameters."""
def test_get_init_params(self):
"""Test the get_init_params() method."""
independent_pc = param_domain.ParamChange(
'a', 'Copier', {'value': 'firstValue', 'parse_with_jinja': False})
dependent_pc = param_domain.ParamChange(
'b', 'Copier', {'value': '{{a}}', 'parse_with_jinja': True})
exp_param_specs = {
'a': param_domain.ParamSpec('UnicodeString'),
'b': param_domain.ParamSpec('UnicodeString'),
}
new_params = self.get_updated_param_dict(
{}, [independent_pc, dependent_pc], exp_param_specs)
self.assertEqual(new_params, {'a': 'firstValue', 'b': 'firstValue'})
# Jinja string evaluation fails gracefully on dependencies that do not
# exist.
new_params = self.get_updated_param_dict(
{}, [dependent_pc, independent_pc], exp_param_specs)
self.assertEqual(new_params, {'a': 'firstValue', 'b': ''})
def test_update_learner_params(self):
"""Test the update_learner_params() method."""
independent_pc = param_domain.ParamChange(
'a', 'Copier', {'value': 'firstValue', 'parse_with_jinja': False})
dependent_pc = param_domain.ParamChange(
'b', 'Copier', {'value': '{{a}}', 'parse_with_jinja': True})
exp_param_specs = {
'a': param_domain.ParamSpec('UnicodeString'),
'b': param_domain.ParamSpec('UnicodeString'),
}
old_params = {}
new_params = self.get_updated_param_dict(
old_params, [independent_pc, dependent_pc], exp_param_specs)
self.assertEqual(new_params, {'a': 'firstValue', 'b': 'firstValue'})
self.assertEqual(old_params, {})
old_params = {'a': 'secondValue'}
new_params = self.get_updated_param_dict(
old_params, [dependent_pc], exp_param_specs)
self.assertEqual(new_params, {'a': 'secondValue', 'b': 'secondValue'})
self.assertEqual(old_params, {'a': 'secondValue'})
# Jinja string evaluation fails gracefully on dependencies that do not
# exist.
old_params = {}
new_params = self.get_updated_param_dict(
old_params, [dependent_pc], exp_param_specs)
self.assertEqual(new_params, {'b': ''})
self.assertEqual(old_params, {})
class RatingsIntegrationTests(test_utils.GenericTestBase):
"""Integration tests of ratings recording and display."""
EXP_ID = '0'
def setUp(self):
super(RatingsIntegrationTests, self).setUp()
exp_services.load_demo(self.EXP_ID)
def test_assign_and_read_ratings(self):
"""Test the PUT and GET methods for ratings."""
self.signup('user@example.com', 'user')
self.login('user@example.com')
csrf_token = self.get_new_csrf_token()
# User checks rating.
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], None)
self.assertEqual(
ratings['overall_ratings'],
{'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})
# User rates and checks rating.
self.put_json(
'/explorehandler/rating/%s' % self.EXP_ID, {
'user_rating': 2
}, csrf_token=csrf_token
)
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], 2)
self.assertEqual(
ratings['overall_ratings'],
{'1': 0, '2': 1, '3': 0, '4': 0, '5': 0})
# User re-rates and checks rating.
self.login('user@example.com')
self.put_json(
'/explorehandler/rating/%s' % self.EXP_ID, {
'user_rating': 5
}, csrf_token=csrf_token
)
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], 5)
self.assertEqual(
ratings['overall_ratings'],
{'1': 0, '2': 0, '3': 0, '4': 0, '5': 1})
self.logout()
def test_non_logged_in_users_cannot_rate(self):
"""Check non logged-in users can view but not submit ratings."""
self.signup('user@example.com', 'user')
self.login('user@example.com')
csrf_token = self.get_new_csrf_token()
self.logout()
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], None)
self.assertEqual(
ratings['overall_ratings'],
{'1': 0, '2': 0, '3': 0, '4': 0, '5': 0})
self.put_json(
'/explorehandler/rating/%s' % self.EXP_ID, {
'user_rating': 1
}, csrf_token=csrf_token,
expected_status_int=401
)
def test_ratings_by_different_users(self):
"""Check that ratings by different users do not interfere."""
self.signup('a@example.com', 'a')
self.signup('b@example.com', 'b')
self.login('a@example.com')
csrf_token = self.get_new_csrf_token()
self.put_json(
'/explorehandler/rating/%s' % self.EXP_ID, {
'user_rating': 4
}, csrf_token=csrf_token
)
self.logout()
self.login('b@example.com')
csrf_token = self.get_new_csrf_token()
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], None)
self.put_json(
'/explorehandler/rating/%s' % self.EXP_ID, {
'user_rating': 4
}, csrf_token=csrf_token
)
ratings = self.get_json('/explorehandler/rating/%s' % self.EXP_ID)
self.assertEqual(ratings['user_rating'], 4)
self.assertEqual(
ratings['overall_ratings'],
{'1': 0, '2': 0, '3': 0, '4': 2, '5': 0})
self.logout()
class RecommendationsHandlerTests(test_utils.GenericTestBase):
"""Backend integration tests for recommended explorations for after an
exploration is completed.
"""
# Demo explorations.
EXP_ID_0 = '0'
EXP_ID_1 = '1'
EXP_ID_7 = '7'
EXP_ID_8 = '8'
# Explorations contained within the demo collection.
EXP_ID_19 = '19'
EXP_ID_20 = '20'
EXP_ID_21 = '21'
# Demo collection.
COL_ID = '0'
def setUp(self):
super(RecommendationsHandlerTests, self).setUp()
# Register users.
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_USERNAME)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
# Login and create activities.
self.login(self.EDITOR_EMAIL)
exp_services.load_demo(self.EXP_ID_0)
exp_services.load_demo(self.EXP_ID_1)
exp_services.load_demo(self.EXP_ID_7)
exp_services.load_demo(self.EXP_ID_8)
collection_services.load_demo(self.COL_ID)
self.logout()
def _get_exploration_ids_from_summaries(self, summaries):
"""Returns the sorted list of all the exploration ids from summaries."""
return sorted([summary['id'] for summary in summaries])
def _get_recommendation_ids(
self, exploration_id, collection_id=None,
include_system_recommendations=None,
author_recommended_ids_str='[]'):
"""Gets the recommended exploration ids from the summaries."""
collection_id_param = (
'&collection_id=%s' % collection_id
if collection_id is not None else '')
include_recommendations_param = (
'&include_system_recommendations=%s' % (
include_system_recommendations)
if include_system_recommendations is not None else '')
recommendations_url = (
'/explorehandler/recommendations/%s?'
'stringified_author_recommended_ids=%s%s%s' % (
exploration_id, author_recommended_ids_str, collection_id_param,
include_recommendations_param))
summaries = self.get_json(recommendations_url)['summaries']
return self._get_exploration_ids_from_summaries(summaries)
# TODO(bhenning): Add tests for ensuring system explorations are properly
# sampled when there are many matched for a given exploration ID.
# TODO(bhenning): Verify whether recommended author-specified explorations
# are also played within the context of collections, and whether that's
# desirable.
def _set_recommendations(self, exp_id, recommended_ids):
"""Sets the recommendations in the exploration corresponding to the
given exploration id.
"""
recommendations_services.set_recommendations(exp_id, recommended_ids)
def _complete_exploration_in_collection(self, exp_id):
"""Completes the exploration within the collection. Records that the
exploration has been played by the user in the context of the
collection.
"""
collection_services.record_played_exploration_in_collection_context(
self.new_user_id, self.COL_ID, exp_id)
def _complete_entire_collection_in_order(self):
"""Completes the entire collection in order."""
self._complete_exploration_in_collection(self.EXP_ID_19)
self._complete_exploration_in_collection(self.EXP_ID_20)
self._complete_exploration_in_collection(self.EXP_ID_21)
self._complete_exploration_in_collection(self.EXP_ID_0)
# Logged in standard viewer tests.
def test_logged_in_with_no_sysexps_no_authexps_no_col_has_no_exps(self):
"""Check there are no recommended explorations when a user is logged in,
finishes an exploration in-viewer, but there are no recommended
explorations and no author exploration IDs.
"""
self.login(self.NEW_USER_EMAIL)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True)
self.assertEqual(recommendation_ids, [])
def test_logged_in_with_some_sysexps_no_authexps_no_col_has_some_exps(self):
"""Check there are recommended explorations when a user is logged in,
finishes an exploration in-viewer, and there are system recommendations.
"""
self.login(self.NEW_USER_EMAIL)
self._set_recommendations(self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True)
self.assertEqual(recommendation_ids, [self.EXP_ID_1, self.EXP_ID_8])
def test_logged_in_with_no_sysexps_some_authexps_no_col_has_some_exps(self):
"""Check there are some recommended explorations when a user is logged
in, finishes an exploration in-viewer, and there are author-specified
exploration IDs.
"""
self.login(self.NEW_USER_EMAIL)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True,
author_recommended_ids_str='["7","8"]')
self.assertEqual(recommendation_ids, [self.EXP_ID_7, self.EXP_ID_8])
def test_logged_in_with_sysexps_and_authexps_no_col_has_some_exps(self):
"""Check there are recommended explorations when a user is logged in,
finishes an exploration in-viewer, and there are both author-specified
exploration IDs and recommendations from the system.
"""
self.login(self.NEW_USER_EMAIL)
self._set_recommendations(self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True,
author_recommended_ids_str='["7","8"]')
self.assertEqual(
recommendation_ids, [self.EXP_ID_1, self.EXP_ID_7, self.EXP_ID_8])
# Logged in in-editor tests.
def test_logged_in_preview_no_authexps_no_col_has_no_exps(self):
"""Check there are no recommended explorations when a user is logged in,
finishes an exploration in-editor (no system recommendations since it's
a preview of the exploration), and there are no author exploration IDs.
"""
self.login(self.NEW_USER_EMAIL)
recommendation_ids = self._get_recommendation_ids(self.EXP_ID_0)
self.assertEqual(recommendation_ids, [])
def test_logged_in_preview_with_authexps_no_col_has_some_exps(self):
"""Check there are some recommended explorations when a user is logged
in, finishes an exploration in-editor (no system recommendations), and
there are some author exploration IDs.
"""
self.login(self.NEW_USER_EMAIL)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, author_recommended_ids_str='["7","8"]')
self.assertEqual(recommendation_ids, [self.EXP_ID_7, self.EXP_ID_8])
# Logged in collection tests.
def test_logged_in_no_sysexps_no_authexps_first_exp_in_col_has_exp(self):
"""Check there is a recommended exploration when a user is logged in
and completes the first exploration of a collection.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_exploration_in_collection(self.EXP_ID_19)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID)
# The next exploration in the collection should be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_20])
def test_logged_in_no_sysexps_no_authexps_mid_exp_in_col_has_exps(self):
"""Check there are recommended explorations when a user is logged in
and completes a middle exploration of the collection (since more
explorations are needed to complete the collection).
"""
self.login(self.NEW_USER_EMAIL)
self._complete_exploration_in_collection(self.EXP_ID_20)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID)
# The first exploration that the user has not yet visited is
# recommended. Since, the collection is linear, in this method, finally,
# the user would visit every node in the collection.
self.assertEqual(recommendation_ids, [self.EXP_ID_19])
def test_logged_in_no_sysexps_no_authexps_all_exps_in_col_has_no_exps(self):
"""Check there are not recommended explorations when a user is logged in
and completes all explorations of the collection.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_entire_collection_in_order()
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID)
# No explorations are recommended since the collection was completed.
self.assertEqual(recommendation_ids, [])
def test_logged_in_with_sysexps_no_authexps_first_exp_in_col_has_exp(self):
"""Check there is a recommended exploration when a user is logged in
and completes the first exploration of a collection. Note that even
though the completed exploration has system recommendations, they are
ignored in favor of the collection's own recommendations.
"""
self.login(self.NEW_USER_EMAIL)
self._set_recommendations(
self.EXP_ID_19, [self.EXP_ID_1, self.EXP_ID_8])
self._complete_exploration_in_collection(self.EXP_ID_19)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID,
include_system_recommendations=True)
# The next exploration in the collection should be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_20])
def test_logged_in_with_sysexps_no_authexps_mid_exp_in_col_has_exps(self):
"""Check there are recommended explorations when a user is logged in
and completes a middle exploration of the collection (since more
explorations are needed to complete the collection). Note that even
though the completed exploration has system recommendations, they are
ignored in favor of the collection's own recommendations.
"""
self.login(self.NEW_USER_EMAIL)
self._set_recommendations(
self.EXP_ID_20, [self.EXP_ID_1, self.EXP_ID_8])
self._complete_exploration_in_collection(self.EXP_ID_20)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID,
include_system_recommendations=True)
# The first exploration that the user has not yet visited is
# recommended. Since, the collection is linear, in this method, finally,
# the user would visit every node in the collection.
self.assertEqual(recommendation_ids, [self.EXP_ID_19])
def test_logged_in_sysexps_no_authexps_all_exps_in_col_has_no_exps(self):
"""Check there are not recommended explorations when a user is logged in
and completes all explorations of the collection. This is true even if
there are system recommendations for the last exploration.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_entire_collection_in_order()
self._set_recommendations(
self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID,
include_system_recommendations=True)
# No explorations are recommended since the collection was completed.
self.assertEqual(recommendation_ids, [])
def test_logged_in_no_sysexps_with_authexps_first_exp_in_col_has_exps(self):
"""Check there is are recommended explorations when a user is logged in
and completes the first exploration of a collection where that
exploration also has author-specified explorations.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_exploration_in_collection(self.EXP_ID_19)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID,
author_recommended_ids_str='["7","8"]')
# The next exploration in the collection should be recommended along
# with author specified explorations.
self.assertEqual(
recommendation_ids, [self.EXP_ID_20, self.EXP_ID_7, self.EXP_ID_8])
def test_logged_in_no_sysexps_with_authexps_mid_exp_in_col_has_exps(self):
"""Check there are recommended explorations when a user is logged in
and completes a middle exploration of the collection, and that these
recommendations include author-specified explorations.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_exploration_in_collection(self.EXP_ID_20)
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID,
author_recommended_ids_str='["7","21"]')
# The first & next explorations should be recommended, along with author
# specified explorations.
self.assertEqual(
recommendation_ids, [self.EXP_ID_19, self.EXP_ID_21, self.EXP_ID_7])
def test_logged_in_no_sysexps_authexps_all_exps_in_col_has_exps(self):
"""Check there are still recommended explorations when a user is logged
in and completes all explorations of the collection if the last
exploration has author-specified explorations.
"""
self.login(self.NEW_USER_EMAIL)
self._complete_entire_collection_in_order()
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID,
author_recommended_ids_str='["7","8"]')
# Only author specified explorations should be recommended since all
# others in the collection have been completed.
self.assertEqual(recommendation_ids, [self.EXP_ID_7, self.EXP_ID_8])
# Logged out standard viewer tests.
def test_logged_out_with_no_sysexps_no_authexps_no_col_has_no_exps(self):
"""Check there are no recommended explorations when a user is logged
out, finishes an exploration in-viewer, but there are no recommended
explorations and no author exploration IDs.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True)
self.assertEqual(recommendation_ids, [])
def test_logged_out_with_sysexps_no_authexps_no_col_has_some_exps(self):
"""Check there are recommended explorations when a user is logged out,
finishes an exploration in-viewer, and there are system recommendations.
"""
self._set_recommendations(self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True)
self.assertEqual(recommendation_ids, [self.EXP_ID_1, self.EXP_ID_8])
def test_logged_out_no_sysexps_some_authexps_no_col_has_some_exps(self):
"""Check there are some recommended explorations when a user is logged
out, finishes an exploration in-viewer, and there are author-specified
exploration IDs.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True,
author_recommended_ids_str='["7","8"]')
self.assertEqual(recommendation_ids, [self.EXP_ID_7, self.EXP_ID_8])
def test_logged_out_with_sysexps_and_authexps_no_col_has_some_exps(self):
"""Check there are recommended explorations when a user is logged in,
finishes an exploration in-viewer, and there are both author-specified
exploration IDs and recommendations from the system.
"""
self._set_recommendations(self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, include_system_recommendations=True,
author_recommended_ids_str='["7","8"]')
self.assertEqual(
recommendation_ids, [self.EXP_ID_1, self.EXP_ID_7, self.EXP_ID_8])
# Logged out collection tests.
def test_logged_out_no_sysexps_no_authexps_first_exp_in_col_has_exp(self):
"""Check there is a recommended exploration when a user is logged out
and completes the first exploration of a collection.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID)
# The next exploration in the collection should be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_20])
def test_logged_out_no_sysexps_no_authexps_mid_exp_in_col_has_exp(self):
"""Check there is a recommended exploration when a user is logged out
and completes a middle exploration of the collection.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID)
# Only the last exploration should be recommended since logged out users
# follow a linear path through the collection.
self.assertEqual(recommendation_ids, [self.EXP_ID_21])
def test_logged_out_no_sysexps_no_authexps_last_exp_col_has_no_exps(self):
"""Check there are not recommended explorations when a user is logged
out and completes the last exploration in the collection.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID)
self.assertEqual(recommendation_ids, [])
def test_logged_out_with_sysexps_no_authexps_first_exp_in_col_has_exp(self):
"""Check there is a recommended exploration when a user is logged out
and completes the first exploration of a collection. Note that even
though the completed exploration has system recommendations, they are
ignored in favor of the collection's own recommendations.
"""
self._set_recommendations(
self.EXP_ID_19, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID,
include_system_recommendations=True)
# The next exploration in the collection should be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_20])
def test_logged_out_with_sysexps_no_authexps_mid_exp_in_col_has_exp(self):
"""Check there is a recommended explorations when a user is logged out
and completes a middle exploration of the collection. Note that even
though the completed exploration has system recommendations, they are
ignored in favor of the collection's own recommendations.
"""
self._set_recommendations(
self.EXP_ID_20, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID,
include_system_recommendations=True)
# Only the last exploration should be recommended since logged out users
# follow a linear path through the collection.
self.assertEqual(recommendation_ids, [self.EXP_ID_21])
def test_logged_out_sysexps_no_authexps_last_exp_in_col_has_no_exps(self):
"""Check there are not recommended explorations when a user is logged
out and completes the last exploration of the collection. This is true
even if there are system recommendations for the last exploration.
"""
self._set_recommendations(
self.EXP_ID_0, [self.EXP_ID_1, self.EXP_ID_8])
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID,
include_system_recommendations=True)
# The collection is completed, so no other explorations should be
# recommended.
self.assertEqual(recommendation_ids, [])
def test_logged_out_no_sysexps_but_authexps_first_exp_in_col_has_exps(self):
"""Check there is are recommended explorations when a user is logged out
and completes the first exploration of a collection where that
exploration also has author-specified explorations.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_19, collection_id=self.COL_ID,
author_recommended_ids_str='["7","8"]')
# The next exploration in the collection should be recommended along
# with author specified explorations.
self.assertEqual(
recommendation_ids, [self.EXP_ID_20, self.EXP_ID_7, self.EXP_ID_8])
def test_logged_out_no_sysexps_with_authexps_mid_exp_in_col_has_exps(self):
"""Check there are recommended explorations when a user is logged out
and completes a middle exploration of the collection where that
exploration also has author-specified explorations.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID,
author_recommended_ids_str='["7"]')
# Both the next exploration & the author-specified explorations should
# be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_21, self.EXP_ID_7])
def test_logged_out_no_sysexps_with_dup_authexps_mid_col_exp_has_exps(self):
"""test_logged_out_no_sysexps_with_authexps_mid_exp_in_col_has_exps but
also checks that exploration IDs are de-duped if the next exploration
overlaps with the author-specified explorations.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_20, collection_id=self.COL_ID,
author_recommended_ids_str='["7", "21"]')
# Both the next exploration & the author-specified explorations should
# be recommended.
self.assertEqual(recommendation_ids, [self.EXP_ID_21, self.EXP_ID_7])
def test_logged_out_no_sysexps_authexps_last_exp_in_col_has_exps(self):
"""Check there are still recommended explorations when a user is logged
out and completes all explorations of the collection if the last
exploration has author-specified explorations.
"""
recommendation_ids = self._get_recommendation_ids(
self.EXP_ID_0, collection_id=self.COL_ID,
author_recommended_ids_str='["7","8"]')
# Only author specified explorations should be recommended since all
# others in the collection have been completed.
self.assertEqual(recommendation_ids, [self.EXP_ID_7, self.EXP_ID_8])
def test_get_recommendation_ids_with_invalid_author_recommended_ids(self):
self.get_json(
'/explorehandler/recommendations/%s' % self.EXP_ID_1, params={
'collection_id': 'collection_id',
'include_system_recommendations': True,
'stringified_author_recommended_ids': 'invalid_type'
}, expected_status_int=404
)
class FlagExplorationHandlerTests(test_utils.GenericTestBase):
"""Backend integration tests for flagging an exploration."""
EXP_ID = '0'
REPORT_TEXT = 'AD'
def setUp(self):
super(FlagExplorationHandlerTests, self).setUp()
# Register users.
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration.
self.login(self.EDITOR_EMAIL)
# Create exploration.
self.save_new_valid_exploration(
self.EXP_ID, self.editor_id,
title='Welcome to Oppia!',
category='This is just a spam category',
objective='Test a spam exploration.')
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
rights_manager.publish_exploration(self.editor, self.EXP_ID)
self.logout()
def test_that_emails_are_sent(self):
"""Check that emails are sent to moderaters when a logged-in
user reports.
"""
# Login and flag exploration.
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/%s' % (feconf.FLAG_EXPLORATION_URL_PREFIX, self.EXP_ID), {
'report_text': self.REPORT_TEXT,
}, csrf_token=csrf_token)
self.logout()
expected_email_html_body = (
'Hello Moderator,<br>'
'newuser has flagged exploration '
'"Welcome to Oppia!"'
' on the following grounds: <br>'
'AD .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/0">'
'here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
expected_email_text_body = (
'Hello Moderator,\n'
'newuser has flagged exploration '
'"Welcome to Oppia!"'
' on the following grounds: \n'
'AD .\n'
'You can modify the exploration by clicking here.\n'
'\n'
'Thanks!\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx:
self.process_and_flush_pending_tasks()
messages = self.mail_stub.get_sent_messages(to=self.MODERATOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html.decode(),
expected_email_html_body)
self.assertEqual(
messages[0].body.decode(),
expected_email_text_body)
def test_non_logged_in_users_cannot_report(self):
"""Check that non-logged in users cannot report."""
self.login(self.NEW_USER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.logout()
# Create report for exploration.
self.post_json(
'%s/%s' % (feconf.FLAG_EXPLORATION_URL_PREFIX, self.EXP_ID), {
'report_text': self.REPORT_TEXT,
}, csrf_token=csrf_token,
expected_status_int=401)
class LearnerProgressTest(test_utils.GenericTestBase):
"""Tests for tracking learner progress."""
EXP_ID_0 = 'exp_0'
EXP_ID_1 = 'exp_1'
# The first number corresponds to the collection to which the exploration
# belongs. The second number corresponds to the exploration id.
EXP_ID_1_0 = 'exp_2'
EXP_ID_1_1 = 'exp_3'
COL_ID_0 = 'col_0'
COL_ID_1 = 'col_1'
USER_EMAIL = 'user@example.com'
USER_USERNAME = 'user'
def setUp(self):
super(LearnerProgressTest, self).setUp()
self.signup(self.USER_EMAIL, self.USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.USER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
# Save and publish explorations.
self.save_new_valid_exploration(
self.EXP_ID_0, self.owner_id, title='Bridges in England',
category='Architecture', language_code='en')
self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id, title='Welcome',
category='Architecture', language_code='fi')
self.save_new_valid_exploration(
self.EXP_ID_1_0, self.owner_id, title='Sillat Suomi',
category='Architecture', language_code='fi')
self.save_new_valid_exploration(
self.EXP_ID_1_1, self.owner_id,
title='Introduce Interactions in Oppia',
category='Welcome', language_code='en')
rights_manager.publish_exploration(self.owner, self.EXP_ID_0)
rights_manager.publish_exploration(self.owner, self.EXP_ID_1)
rights_manager.publish_exploration(self.owner, self.EXP_ID_1_0)
rights_manager.publish_exploration(self.owner, self.EXP_ID_1_1)
# Save a new collection.
self.save_new_default_collection(
self.COL_ID_0, self.owner_id, title='Welcome',
category='Architecture')
self.save_new_default_collection(
self.COL_ID_1, self.owner_id, title='Bridges in England',
category='Architecture')
# Add two explorations to the previously saved collection and publish
# it.
for exp_id in [self.EXP_ID_1_0, self.EXP_ID_1_1]:
collection_services.update_collection(
self.owner_id, self.COL_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': exp_id
}], 'Added new exploration')
# Publish the collections.
rights_manager.publish_collection(self.owner, self.COL_ID_0)
rights_manager.publish_collection(self.owner, self.COL_ID_1)
def test_independent_exp_complete_event_handler(self):
"""Test handler for completion of explorations not in the context of
collections.
"""
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'client_time_spent_in_secs': 0,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'final',
'version': 1
}
# When an exploration is completed but is not in the context of a
# collection, it is just added to the completed explorations list.
self.post_json(
'/explorehandler/exploration_complete_event/%s' % self.EXP_ID_0,
payload, csrf_token=csrf_token)
self.assertEqual(learner_progress_services.get_all_completed_exp_ids(
self.user_id), [self.EXP_ID_0])
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [])
# Test another exploration.
self.post_json(
'/explorehandler/exploration_complete_event/%s' % self.EXP_ID_1_0,
payload, csrf_token=csrf_token)
self.assertEqual(learner_progress_services.get_all_completed_exp_ids(
self.user_id), [self.EXP_ID_0, self.EXP_ID_1_0])
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [])
def test_exp_complete_event_in_collection(self):
"""Test handler for completion of explorations in the context of
collections.
"""
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'client_time_spent_in_secs': 0,
'collection_id': self.COL_ID_1,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'final',
'version': 1
}
# If the exploration is completed in the context of a collection,
# then in addition to the exploration being added to the completed
# list, the collection is also added to the incomplete/complete list
# dependent on whether there are more explorations left to complete.
# Here we test the case when the collection is partially completed.
self.post_json(
'/explorehandler/exploration_complete_event/%s' % self.EXP_ID_1_0,
payload, csrf_token=csrf_token)
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [self.COL_ID_1])
self.assertEqual(learner_progress_services.get_all_completed_exp_ids(
self.user_id), [self.EXP_ID_1_0])
# Now we test the case when the collection is completed.
self.post_json(
'/explorehandler/exploration_complete_event/%s' % self.EXP_ID_1_1,
payload, csrf_token=csrf_token)
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [])
self.assertEqual(
learner_progress_services.get_all_completed_collection_ids(
self.user_id), [self.COL_ID_1])
self.assertEqual(
learner_progress_services.get_all_completed_exp_ids(
self.user_id), [self.EXP_ID_1_0, self.EXP_ID_1_1])
def test_cannot_complete_exploration_with_no_version(self):
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'client_time_spent_in_secs': 0,
'collection_id': self.COL_ID_1,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'final',
'version': None
}
response = self.post_json(
'/explorehandler/exploration_complete_event/%s' % self.EXP_ID_1_0,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'NONE EXP VERSION: Exploration complete')
def test_exp_incomplete_event_handler(self):
"""Test handler for leaving an exploration incomplete."""
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'client_time_spent_in_secs': 0,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'middle',
'version': 1
}
# Add the incomplete exploration id to the incomplete list.
self.post_json(
'/explorehandler/exploration_maybe_leave_event/%s' % self.EXP_ID_0,
payload, csrf_token=csrf_token)
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [self.EXP_ID_0])
# Adding the exploration again has no effect.
self.post_json(
'/explorehandler/exploration_maybe_leave_event/%s' % self.EXP_ID_0,
payload, csrf_token=csrf_token)
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [self.EXP_ID_0])
payload = {
'client_time_spent_in_secs': 0,
'collection_id': self.COL_ID_1,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'middle',
'version': 1
}
# If the exploration is played in the context of a collection, the
# collection is also added to the incomplete list.
self.post_json(
'/explorehandler/exploration_maybe_leave_event/%s' % self.EXP_ID_1_0, # pylint: disable=line-too-long
payload, csrf_token=csrf_token)
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [self.EXP_ID_0, self.EXP_ID_1_0])
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [self.COL_ID_1])
def test_exp_incomplete_event_handler_with_no_version_raises_error(self):
self.login(self.USER_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'client_time_spent_in_secs': 0,
'params': {},
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'state_name': 'middle',
}
response = self.post_json(
'/explorehandler/exploration_maybe_leave_event/%s' % self.EXP_ID_0,
payload, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(response['error'], 'NONE EXP VERSION: Maybe quit')
def test_remove_exp_from_incomplete_list_handler(self):
"""Test handler for removing explorations from the partially completed
list.
"""
self.login(self.USER_EMAIL)
state_name = 'state_name'
version = 1
# Add two explorations to the partially completed list.
learner_progress_services.mark_exploration_as_incomplete(
self.user_id, self.EXP_ID_0, state_name, version)
learner_progress_services.mark_exploration_as_incomplete(
self.user_id, self.EXP_ID_1, state_name, version)
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [self.EXP_ID_0, self.EXP_ID_1])
# Remove one exploration.
self.delete_json(str(
'%s/%s/%s' %
(
feconf.LEARNER_INCOMPLETE_ACTIVITY_DATA_URL,
constants.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_0)))
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [self.EXP_ID_1])
# Remove another exploration.
self.delete_json(str(
'%s/%s/%s' %
(
feconf.LEARNER_INCOMPLETE_ACTIVITY_DATA_URL,
constants.ACTIVITY_TYPE_EXPLORATION,
self.EXP_ID_1)))
self.assertEqual(
learner_progress_services.get_all_incomplete_exp_ids(
self.user_id), [])
def test_remove_collection_from_incomplete_list_handler(self):
"""Test handler for removing collections from incomplete list."""
self.login(self.USER_EMAIL)
# Add two collections to incomplete list.
learner_progress_services.mark_collection_as_incomplete(
self.user_id, self.COL_ID_0)
learner_progress_services.mark_collection_as_incomplete(
self.user_id, self.COL_ID_1)
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [self.COL_ID_0, self.COL_ID_1])
# Remove one collection.
self.delete_json(str(
'%s/%s/%s' %
(
feconf.LEARNER_INCOMPLETE_ACTIVITY_DATA_URL,
constants.ACTIVITY_TYPE_COLLECTION,
self.COL_ID_0)))
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [self.COL_ID_1])
# Remove another collection.
self.delete_json(str(
'%s/%s/%s' %
(
feconf.LEARNER_INCOMPLETE_ACTIVITY_DATA_URL,
constants.ACTIVITY_TYPE_COLLECTION,
self.COL_ID_1)))
self.assertEqual(
learner_progress_services.get_all_incomplete_collection_ids(
self.user_id), [])
class StorePlaythroughHandlerTest(test_utils.GenericTestBase):
"""Tests for the handler that records playthroughs."""
def setUp(self):
super(StorePlaythroughHandlerTest, self).setUp()
self.exp_id = '15'
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
exp_services.load_demo(self.exp_id)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
playthrough_id = stats_models.PlaythroughModel.create(
self.exp_id, self.exploration.version, 'EarlyQuit',
{
'state_name': {
'value': 'state_name1'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
[{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}])
stats_models.ExplorationIssuesModel.create(
self.exp_id, 1, [{
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'state_name1'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id],
'schema_version': 1,
'is_valid': True
}])
self.playthrough_data = {
'exp_id': self.exp_id,
'exp_version': self.exploration.version,
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'state_name1'
},
'time_spent_in_exp_in_msecs': {
'value': 250
}
},
'actions': [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}]
}
self.csrf_token = self.get_new_csrf_token()
def test_new_playthrough_gets_stored(self):
"""Test that a new playthrough gets created and is added to an existing
issue's list of playthrough IDs.
"""
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 1)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 2)
def test_new_exp_issue_gets_created(self):
"""Test that a new playthrough gets created and a new issue is created
for it.
"""
self.playthrough_data['issue_customization_args']['state_name'][
'value'] = 'state_name2'
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 2)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 1)
self.assertEqual(len(model.unresolved_issues[1]['playthrough_ids']), 1)
def test_playthrough_gets_added_to_cyclic_issues(self):
"""Test that a new cyclic playthrough gets added to the correct
cyclic issue when it exists.
"""
playthrough_id = stats_models.PlaythroughModel.create(
self.exp_id, self.exploration.version, 'CyclicStateTransitions',
{
'state_names': {
'value': ['state_name1', 'state_name2', 'state_name1']
},
},
[{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}])
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
model.unresolved_issues.append({
'issue_type': 'CyclicStateTransitions',
'issue_customization_args': {
'state_names': {
'value': ['state_name1', 'state_name2', 'state_name1']
},
},
'playthrough_ids': [playthrough_id],
'schema_version': 1,
'is_valid': True
})
model.put()
self.playthrough_data = {
'exp_id': self.exp_id,
'exp_version': self.exploration.version,
'issue_type': 'CyclicStateTransitions',
'issue_customization_args': {
'state_names': {
'value': ['state_name1', 'state_name2', 'state_name1']
},
},
'actions': [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}],
}
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 2)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 1)
self.assertEqual(len(model.unresolved_issues[1]['playthrough_ids']), 2)
def test_cyclic_issues_of_different_order_creates_new_issue(self):
"""Test that a cyclic issue with the same list of states, but in
a different order creates a new issue.
"""
playthrough_id = stats_models.PlaythroughModel.create(
self.exp_id, self.exploration.version, 'CyclicStateTransitions',
{
'state_names': {
'value': ['state_name1', 'state_name2', 'state_name1']
},
},
[{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}])
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
model.unresolved_issues.append({
'issue_type': 'CyclicStateTransitions',
'issue_customization_args': {
'state_names': {
'value': ['state_name1', 'state_name2', 'state_name1']
},
},
'playthrough_ids': [playthrough_id],
'schema_version': 1,
'is_valid': True
})
model.put()
self.playthrough_data = {
'exp_id': self.exp_id,
'exp_version': self.exploration.version,
'issue_type': 'CyclicStateTransitions',
'issue_customization_args': {
'state_names': {
'value': ['state_name1', 'state_name1', 'state_name2']
},
},
'actions': [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}]
}
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 3)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 1)
self.assertEqual(len(model.unresolved_issues[1]['playthrough_ids']), 1)
self.assertEqual(len(model.unresolved_issues[2]['playthrough_ids']), 1)
def test_playthrough_not_stored_at_limiting_value(self):
"""Test that a playthrough is not stored when the maximum number of
playthroughs per issue already exists.
"""
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
model.unresolved_issues[0]['playthrough_ids'] = [
'id1', 'id2', 'id3', 'id4', 'id5']
model.put()
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 1)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 5)
def test_error_without_schema_version_in_payload_dict(self):
"""Test that passing a payload without schema version raises an
exception.
"""
payload_dict_without_schema_version = {
'playthrough_data': self.playthrough_data,
'playthrough_id': None
}
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
payload_dict_without_schema_version,
csrf_token=self.csrf_token,
expected_status_int=400)
def test_error_on_invalid_playthrough_dict(self):
"""Test that passing an invalid playthrough dict raises an exception."""
self.playthrough_data['issue_type'] = 'FakeIssueType'
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token,
expected_status_int=400)
def test_playthrough_id_is_returned(self):
"""Test that playthrough ID is returned when it is stored for the first
time and the playthrough is updated from the next time.
"""
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 1)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 2)
playthrough_id = model.unresolved_issues[0]['playthrough_ids'][1]
self.assertEqual(response['playthrough_id'], playthrough_id)
self.assertEqual(response['playthrough_stored'], True)
def test_playthrough_is_subsequently_updated(self):
"""Test that a playthrough is updated if the controller is called for
the second time.
"""
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
playthrough_id = response['playthrough_id']
self.playthrough_data['id'] = playthrough_id
self.playthrough_data['issue_customization_args'][
'time_spent_in_exp_in_msecs']['value'] = 150
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': playthrough_id
}, csrf_token=self.csrf_token)
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 1)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 2)
playthrough_id = model.unresolved_issues[0]['playthrough_ids'][1]
playthrough = stats_services.get_playthrough_by_id(playthrough_id)
self.assertEqual(
playthrough.issue_customization_args['time_spent_in_exp_in_msecs'][
'value'], 150)
def test_updating_playthrough_issue(self):
"""Test that updating an existing playthrough's issue creates a new
issue if the issue doesn't exist.
"""
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.process_and_flush_pending_tasks()
playthrough_id = response['playthrough_id']
self.playthrough_data['id'] = playthrough_id
self.playthrough_data['issue_type'] = 'CyclicStateTransitions'
self.playthrough_data['issue_customization_args'] = {
'state_names': {
'value': ['state1', 'state2', 'state1']
}
}
self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': playthrough_id
}, csrf_token=self.csrf_token)
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(len(model.unresolved_issues), 2)
self.assertEqual(len(model.unresolved_issues[0]['playthrough_ids']), 1)
self.assertEqual(len(model.unresolved_issues[1]['playthrough_ids']), 1)
playthrough_id = model.unresolved_issues[1]['playthrough_ids'][0]
playthrough = stats_services.get_playthrough_by_id(playthrough_id)
self.assertEqual(playthrough.issue_type, 'CyclicStateTransitions')
self.assertEqual(
playthrough.issue_customization_args['state_names'][
'value'], ['state1', 'state2', 'state1'])
def test_cannot_update_playthrough_issue_with_no_playthrough_id(self):
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1
}, csrf_token=self.csrf_token, expected_status_int=400)
self.assertEqual(response['error'], u'\'playthrough_id\'')
def test_move_playthrough_to_correct_issue(self):
playthrough_id = stats_models.PlaythroughModel.create(
self.exp_id, 1, 'MultipleIncorrectSubmissions',
{
'state_name': {
'value': 'state_name1'
},
'num_times_answered_incorrectly': {
'value': 7
}
},
[{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'state_name1'
}
},
'schema_version': 1
}])
stats_models.ExplorationIssuesModel.create(
self.exp_id, 1, [{
'issue_type': 'MultipleIncorrectSubmissions',
'issue_customization_args': {
'state_name': {
'value': 'state_name1'
},
'num_times_answered_incorrectly': {
'value': 7
}
},
'playthrough_ids': [playthrough_id],
'schema_version': 1,
'is_valid': True
}])
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': None
}, csrf_token=self.csrf_token)
self.playthrough_data['id'] = playthrough_id
self.playthrough_data['issue_type'] = 'MultipleIncorrectSubmissions'
self.playthrough_data['issue_customization_args'] = {
'state_name': {
'value': 'state_name1'
}
}
response = self.post_json(
'/explorehandler/store_playthrough/%s' % (self.exp_id),
{
'playthrough_data': self.playthrough_data,
'issue_schema_version': 1,
'playthrough_id': response['playthrough_id']
}, csrf_token=self.csrf_token)
playthrough = stats_services.get_playthrough_by_id(playthrough_id)
self.assertEqual(playthrough.issue_type, 'MultipleIncorrectSubmissions')
self.assertEqual(
playthrough.issue_customization_args['state_name'][
'value'], 'state_name1')
class StatsEventHandlerTest(test_utils.GenericTestBase):
"""Tests for all the statistics event models recording handlers."""
def setUp(self):
super(StatsEventHandlerTest, self).setUp()
self.exp_id = '15'
self.login(self.VIEWER_EMAIL)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
exp_services.load_demo(self.exp_id)
exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
self.exp_version = exploration.version
self.state_name = 'Home'
self.session_id = 'session_id1'
state_stats_mapping = {
self.state_name: stats_domain.StateStats.create_default()
}
exploration_stats = stats_domain.ExplorationStats(
self.exp_id, self.exp_version, 0, 0, 0, 0, 0, 0,
state_stats_mapping)
stats_services.create_stats_model(exploration_stats)
self.aggregated_stats = {
'num_starts': 1,
'num_actual_starts': 1,
'num_completions': 1,
'state_stats_mapping': {
'Home': {
'total_hit_count': 1,
'first_hit_count': 1,
'total_answers_count': 1,
'useful_feedback_count': 1,
'num_times_solution_viewed': 1,
'num_completions': 1
}
}
}
def test_none_version_raises_exception(self):
"""Test that error is raised on None exp_version."""
self.post_json(
'/explorehandler/stats_events/%s' % (
self.exp_id), {
'aggregated_stats': self.aggregated_stats,
'exp_version': None},
expected_status_int=400)
def test_stats_events_handler(self):
"""Test the handler for handling batched events."""
self.post_json('/explorehandler/stats_events/%s' % (
self.exp_id), {
'aggregated_stats': self.aggregated_stats,
'exp_version': self.exp_version})
self.assertEqual(self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_STATS), 1)
self.process_and_flush_pending_tasks()
# Check that the models are updated.
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version)
self.assertEqual(exploration_stats.num_starts_v2, 1)
self.assertEqual(exploration_stats.num_actual_starts_v2, 1)
self.assertEqual(exploration_stats.num_completions_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].total_hit_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].first_hit_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].total_answers_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].useful_feedback_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].num_completions_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
self.state_name].num_times_solution_viewed_v2, 1)
def test_stats_events_handler_raises_error_with_invalid_exp_stats_property(
self):
observed_log_messages = []
def _mock_logging_function(msg):
"""Mocks logging.error()."""
observed_log_messages.append(msg)
self.aggregated_stats.pop('num_starts')
with self.swap(logging, 'error', _mock_logging_function):
self.post_json('/explorehandler/stats_events/%s' % (
self.exp_id), {
'aggregated_stats': self.aggregated_stats,
'exp_version': self.exp_version})
self.assertEqual(len(observed_log_messages), 1)
self.assertIn(
'num_starts not in aggregated stats dict.',
observed_log_messages[0])
def test_stats_events_handler_raise_error_with_invalid_state_stats_property(
self):
observed_log_messages = []
def _mock_logging_function(msg):
"""Mocks logging.error()."""
observed_log_messages.append(msg)
self.aggregated_stats['state_stats_mapping']['Home'].pop(
'total_hit_count')
with self.swap(logging, 'error', _mock_logging_function):
self.post_json('/explorehandler/stats_events/%s' % (
self.exp_id), {
'aggregated_stats': self.aggregated_stats,
'exp_version': self.exp_version})
self.assertEqual(len(observed_log_messages), 1)
self.assertIn(
'total_hit_count not in state stats mapping '
'of Home in aggregated stats dict.',
observed_log_messages[0])
class AnswerSubmittedEventHandlerTest(test_utils.GenericTestBase):
"""Tests for the answer submitted event handler."""
def test_submit_answer_for_exploration(self):
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
version = 1
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
state_name_1 = exploration_dict['exploration']['init_state_name']
self.post_json(
'/explorehandler/answer_submitted_event/%s' % exp_id,
{
'old_state_name': state_name_1,
'answer': 'This is an answer.',
'version': version,
'client_time_spent_in_secs': 0,
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': (
exp_domain.EXPLICIT_CLASSIFICATION),
}
)
submitted_answer = stats_services.get_state_answers(
exp_id, version, state_name_1)
self.assertEqual(
len(submitted_answer.get_submitted_answer_dict_list()), 1)
self.assertEqual(
submitted_answer.get_submitted_answer_dict_list()[0]['answer'],
'This is an answer.'
)
self.logout()
def test_submit_answer_for_exploration_raises_error_with_no_version(self):
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
state_name_1 = exploration_dict['exploration']['init_state_name']
response = self.post_json(
'/explorehandler/answer_submitted_event/%s' % exp_id,
{
'old_state_name': state_name_1,
'answer': 'This is an answer.',
'version': None,
'client_time_spent_in_secs': 0,
'session_id': '1PZTCw9JY8y-8lqBeuoJS2ILZMxa5m8N',
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': (
exp_domain.EXPLICIT_CLASSIFICATION),
}, expected_status_int=400
)
self.assertEqual(response['error'], 'NONE EXP VERSION: Answer Submit')
class StateHitEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(StateHitEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_hitting_new_state(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
exploration_version = 1
all_models = (
stats_models.StateHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/state_hit_event/%s' % exp_id,
{
'new_state_name': 'new_state',
'exploration_version': exploration_version,
'client_time_spent_in_secs': 0,
'session_id': 'session_id',
'old_params': {}
}
)
all_models = (
stats_models.StateHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exploration_id, exp_id)
self.assertEqual(model.state_name, 'new_state')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exploration_version, exploration_version)
self.assertEqual(model.params, {})
self.assertEqual(model.play_type, feconf.PLAY_TYPE_NORMAL)
self.logout()
def test_cannot_hit_new_state_with_no_exploration_version(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
all_models = (
stats_models.StateHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
response = self.post_json(
'/explorehandler/state_hit_event/%s' % exp_id,
{
'new_state_name': 'new_state',
'exploration_version': None,
'client_time_spent_in_secs': 0,
'session_id': 'session_id',
'old_params': {}
}, expected_status_int=400
)
self.assertEqual(response['error'], 'NONE EXP VERSION: State hit')
self.logout()
def test_cannot_hit_new_state_with_no_new_state_name(self):
self.login(self.VIEWER_EMAIL)
observed_log_messages = []
def _mock_logging_function(msg):
"""Mocks logging.error()."""
observed_log_messages.append(msg)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
exploration_version = 1
all_models = (
stats_models.StateHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
with self.swap(logging, 'error', _mock_logging_function):
self.post_json(
'/explorehandler/state_hit_event/%s' % exp_id,
{
'new_state_name': None,
'exploration_version': exploration_version,
'client_time_spent_in_secs': 0,
'session_id': 'session_id',
'old_params': {}
}
)
self.assertEqual(
observed_log_messages,
['Unexpected StateHit event for the END state.'])
self.logout()
class StateCompleteEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(StateCompleteEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_completing_a_state(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
exp_version = 1
all_models = (
stats_models.StateCompleteEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/state_complete_event/%s' % exp_id,
{
'state_name': 'state_name',
'exp_version': exp_version,
'time_spent_in_state_secs': 2.0,
'session_id': 'session_id'
}
)
all_models = (
stats_models.StateCompleteEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exp_id, exp_id)
self.assertEqual(model.state_name, 'state_name')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exp_version, exp_version)
self.assertEqual(model.time_spent_in_state_secs, 2.0)
self.logout()
def test_cannot_complete_state_with_no_exploration_version(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
all_models = (
stats_models.StateCompleteEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
response = self.post_json(
'/explorehandler/state_complete_event/%s' % exp_id,
{
'state_name': 'state_name',
'time_spent_in_state_secs': 2.0,
'session_id': 'session_id'
}, expected_status_int=400
)
self.assertEqual(response['error'], 'NONE EXP VERSION: State Complete')
self.logout()
class LeaveForRefresherExpEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(LeaveForRefresherExpEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_leaving_an_exploration(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
exp_version = 1
all_models = (
stats_models.LeaveForRefresherExplorationEventLogEntryModel
.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/leave_for_refresher_exp_event/%s' % exp_id,
{
'state_name': 'state_name',
'exp_version': exp_version,
'time_spent_in_state_secs': 2.0,
'session_id': 'session_id',
'refresher_exp_id': 'refresher_exp_id'
}
)
all_models = (
stats_models.LeaveForRefresherExplorationEventLogEntryModel
.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exp_id, exp_id)
self.assertEqual(model.refresher_exp_id, 'refresher_exp_id')
self.assertEqual(model.state_name, 'state_name')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exp_version, exp_version)
self.assertEqual(model.time_spent_in_state_secs, 2.0)
self.logout()
class ExplorationStartEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationStartEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_starting_a_state(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
version = 1
all_models = (
stats_models.StartExplorationEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/exploration_start_event/%s' % exp_id,
{
'state_name': 'state_name',
'version': version,
'params': {},
'session_id': 'session_id'
}
)
all_models = (
stats_models.StartExplorationEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exploration_id, exp_id)
self.assertEqual(model.state_name, 'state_name')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exploration_version, version)
self.assertEqual(model.params, {})
self.assertEqual(model.play_type, feconf.PLAY_TYPE_NORMAL)
self.logout()
def test_cannot_start_a_state_with_no_exploration_version(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
all_models = (
stats_models.StartExplorationEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
response = self.post_json(
'/explorehandler/exploration_start_event/%s' % exp_id,
{
'state_name': 'state_name',
'params': {},
'session_id': 'session_id'
}, expected_status_int=400
)
self.assertEqual(
response['error'], 'NONE EXP VERSION: Exploration start')
self.logout()
class ExplorationActualStartEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationActualStartEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_actually_starting_a_state(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
version = 1
all_models = (
stats_models.ExplorationActualStartEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/exploration_actual_start_event/%s' % exp_id,
{
'state_name': 'state_name',
'exploration_version': version,
'session_id': 'session_id'
}
)
all_models = (
stats_models.ExplorationActualStartEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exp_id, exp_id)
self.assertEqual(model.state_name, 'state_name')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exp_version, version)
self.logout()
def test_cannot_actually_start_a_state_with_no_exploration_version(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
all_models = (
stats_models.ExplorationActualStartEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
response = self.post_json(
'/explorehandler/exploration_actual_start_event/%s' % exp_id,
{
'state_name': 'state_name',
'session_id': 'session_id'
}, expected_status_int=400
)
self.assertEqual(response['error'], 'NONE EXP VERSION: Actual Start')
self.logout()
class SolutionHitEventHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(SolutionHitEventHandlerTests, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
def test_viewing_solution(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
version = 1
all_models = (
stats_models.SolutionHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
self.post_json(
'/explorehandler/solution_hit_event/%s' % exp_id,
{
'state_name': 'state_name',
'exploration_version': version,
'session_id': 'session_id',
'time_spent_in_state_secs': 2.0
}
)
all_models = (
stats_models.SolutionHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 1)
model = all_models.get()
self.assertEqual(model.exp_id, exp_id)
self.assertEqual(model.state_name, 'state_name')
self.assertEqual(model.session_id, 'session_id')
self.assertEqual(model.exp_version, version)
self.assertEqual(model.time_spent_in_state_secs, 2.0)
self.logout()
def test_cannot_view_solution_with_no_exploration_version(self):
self.login(self.VIEWER_EMAIL)
# Load demo exploration.
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
all_models = (
stats_models.SolutionHitEventLogEntryModel.get_all())
self.assertEqual(all_models.count(), 0)
response = self.post_json(
'/explorehandler/solution_hit_event/%s' % exp_id,
{
'state_name': 'state_name',
'session_id': 'session_id',
'time_spent_in_state_secs': 2.0
}, expected_status_int=400
)
self.assertEqual(response['error'], 'NONE EXP VERSION: Solution hit')
self.logout()
class ExplorationEmbedPageTests(test_utils.GenericTestBase):
COL_ID = 'col_id'
EXP_ID = 'exp_id'
def setUp(self):
super(ExplorationEmbedPageTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def test_exploration_embed_page(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_collection(self.COL_ID, self.owner_id)
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id)
response = self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_EMBED_PREFIX, self.EXP_ID),
params={
'v': exploration.version,
'collection_id': self.COL_ID
}
)
self.assertIn(
'<exploration-player-page></exploration-player-page>',
response.body)
self.logout()
def test_handler_raises_error_with_invalid_exploration_id(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_collection(self.COL_ID, self.owner_id)
self.get_html_response(
'%s/invalid_exp_id' % (feconf.EXPLORATION_URL_EMBED_PREFIX),
params={
'collection_id': self.COL_ID
}, expected_status_int=404
)
self.logout()
def test_handler_raises_error_with_invalid_collection_id(self):
self.login(self.OWNER_EMAIL)
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_EMBED_PREFIX, self.EXP_ID),
params={
'v': exploration.version,
'collection_id': 'invalid_collection_id'
}, expected_status_int=404
)
self.logout()
def test_handler_raises_error_with_invalid_version(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
self.save_new_valid_collection(self.COL_ID, self.owner_id)
self.get_html_response(
'%s/%s' % (feconf.EXPLORATION_URL_EMBED_PREFIX, self.EXP_ID),
params={
'v': '10',
'collection_id': self.COL_ID
}, expected_status_int=404
)
self.logout()
class LearnerAnswerDetailsSubmissionHandlerTests(test_utils.GenericTestBase):
"""Tests for learner answer info handler tests."""
def test_submit_learner_answer_details_for_exploration_states(self):
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
exp_id = '6'
exp_services.delete_demo(exp_id)
exp_services.load_demo(exp_id)
entity_type = feconf.ENTITY_TYPE_EXPLORATION
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL, entity_type, exp_id),
{
'state_name': 'abc',
'interaction_id': 'TextInput',
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token, expected_status_int=404)
with self.swap(
constants, 'ENABLE_SOLICIT_ANSWER_DETAILS_FEATURE', True):
exploration_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, exp_id))
state_name = exploration_dict['exploration']['init_state_name']
interaction_id = exploration_dict['exploration'][
'states'][state_name]['interaction']['id']
state_reference = (
stats_models.LearnerAnswerDetailsModel
.get_state_reference_for_exploration(exp_id, state_name))
self.assertEqual(state_name, 'Sentence')
self.assertEqual(interaction_id, 'TextInput')
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL,
entity_type, exp_id),
{
'state_name': state_name,
'interaction_id': interaction_id,
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token)
learner_answer_details = stats_services.get_learner_answer_details(
entity_type, state_reference)
self.assertEqual(
learner_answer_details.state_reference, state_reference)
self.assertEqual(
learner_answer_details.interaction_id, interaction_id)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 1)
self.assertEqual(
learner_answer_details.learner_answer_info_list[0].answer,
'This is an answer.')
self.assertEqual(
learner_answer_details.learner_answer_info_list[0]
.answer_details,
'This is an answer details.')
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL,
entity_type, exp_id),
{
'state_name': state_name,
'interaction_id': 'GraphInput',
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token, expected_status_int=500)
def test_submit_learner_answer_details_for_question(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
editor_id = self.get_user_id_from_email(
self.EDITOR_EMAIL)
question_id = question_services.get_new_question_id()
self.save_new_question(
question_id, editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
with self.swap(
constants, 'ENABLE_SOLICIT_ANSWER_DETAILS_FEATURE', True):
state_reference = (
stats_models.LearnerAnswerDetailsModel
.get_state_reference_for_question(question_id))
self.assertEqual(state_reference, question_id)
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL,
feconf.ENTITY_TYPE_QUESTION, question_id),
{
'interaction_id': 'TextInput',
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_QUESTION, state_reference)
self.assertEqual(
learner_answer_details.state_reference, state_reference)
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL,
feconf.ENTITY_TYPE_QUESTION, question_id),
{
'interaction_id': 'TextInput',
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token)
self.put_json(
'%s/%s/%s' % (
feconf.LEARNER_ANSWER_DETAILS_SUBMIT_URL,
feconf.ENTITY_TYPE_QUESTION, question_id),
{
'interaction_id': 'GraphInput',
'answer': 'This is an answer.',
'answer_details': 'This is an answer details.',
}, csrf_token=csrf_token, expected_status_int=500)
|
souravbadami/oppia
|
core/controllers/reader_test.py
|
Python
|
apache-2.0
| 110,025
|
[
"VisIt"
] |
8af0058f6c2fb29c1d4f3d824724eb05bd24ae3caf60e43ae125e6e10c0b3cf4
|
import numpy as np
import pytest
from elfi.methods import mcmc
# construct a covariance matrix and calculate the precision matrix
n = 5
true_cov = np.random.rand(n, n) * 0.5
true_cov += true_cov.T
true_cov += n * np.eye(n)
prec = np.linalg.inv(true_cov)
# log multivariate Gaussian pdf
def log_pdf(x):
return -0.5 * x.dot(prec).dot(x)
# gradient of log multivariate Gaussian pdf
def grad_log_pdf(x):
return -x.dot(prec)
class TestMetropolis():
def test_metropolis(self):
n_samples = 200000
x_init = np.random.rand(n)
sigma = np.ones(n)
samples = mcmc.metropolis(n_samples, x_init, log_pdf, sigma)
assert samples.shape == (n_samples, n)
cov = np.cov(samples[100000:, :].T)
assert np.allclose(cov, true_cov, atol=0.3, rtol=0.1)
@pytest.mark.slowtest
class TestNUTS():
def test_nuts(self):
n_samples = 100000
n_adapt = 10000
x_init = np.random.rand(n)
samples = mcmc.nuts(n_samples, x_init, log_pdf, grad_log_pdf, n_adapt=n_adapt)
assert samples.shape == (n_samples, n)
cov = np.cov(samples[n_adapt:, :].T)
assert np.allclose(cov, true_cov, atol=0.1, rtol=0.1)
# some data generated in PyStan
chains_Stan = np.array([[0.2955857, 1.27937191, 1.05884099, 0.91236858], [
0.38128885, 1.34242613, 0.49102573, 0.76061715
], [0.38128885, 1.18404563, 0.49102573,
0.78910512], [0.38128885, 0.72150199, 0.49102573,
1.13845618], [0.38128885, 0.72150199, 0.38102685,
0.81298041], [0.26917982, 0.72150199, 0.38102685, 0.81298041],
[0.26917982, 0.68149163, 0.45830605,
0.86364605], [0.51213898, 0.68149163, 0.29170172, 0.80734373],
[0.51213898, 0.85560228, 0.29170172,
0.48134129], [0.22711558, 0.85560228, 0.29170172, 0.48134129]]).T
ess_Stan = 4.09
Rhat_Stan = 1.714
def test_ESS():
assert np.isclose(mcmc.eff_sample_size(chains_Stan), ess_Stan, atol=0.01)
def test_Rhat():
assert np.isclose(mcmc.gelman_rubin_statistic(chains_Stan), Rhat_Stan, atol=0.01)
|
elfi-dev/elfi
|
tests/unit/test_mcmc.py
|
Python
|
bsd-3-clause
| 2,150
|
[
"Gaussian"
] |
9c07e093051141ba531774a82b8764565cb4f180b8ed94ee77a4c80b788da968
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class PRIORITY:
LOWEST = -100
LOWER = -50
LOW = -10
NORMAL = 0
HIGH = 10
HIGHER = 50
HIGHEST = 100
class SORT_ORDER:
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
LAST = 100
class DBMS:
ACCESS = "Microsoft Access"
DB2 = "IBM DB2"
FIREBIRD = "Firebird"
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DETELE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
SET_COOKIE = "Set-Cookie"
HOST = "Host"
LOCATION = "Location"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
SERVER = "Server"
USER_AGENT = "User-Agent"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
VIA = "Via"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUTE_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "UNION query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "inline query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
UNION = 3
STACKED = 4
TIME = 5
QUERY = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TECHNIQUES = 0
DBMS_FINGERPRINT = 1
BANNER = 2
CURRENT_USER = 3
CURRENT_DB = 4
HOSTNAME = 5
IS_DBA = 6
USERS = 7
PASSWORDS = 8
PRIVILEGES = 9
ROLES = 10
DBS = 11
TABLES = 12
COLUMNS = 13
SCHEMA = 14
COUNT = 15
DUMP_TABLE = 16
SEARCH = 17
SQL_QUERY = 18
COMMON_TABLES = 19
COMMON_COLUMNS = 20
FILE_READ = 21
FILE_WRITE = 22
OS_CMD = 23
REG_READ = 24
PART_RUN_CONTENT_TYPES = {
"checkDbms": CONTENT_TYPE.TECHNIQUES,
"getFingerprint": CONTENT_TYPE.DBMS_FINGERPRINT,
"getBanner": CONTENT_TYPE.BANNER,
"getCurrentUser": CONTENT_TYPE.CURRENT_USER,
"getCurrentDb": CONTENT_TYPE.CURRENT_DB,
"getHostname": CONTENT_TYPE.HOSTNAME,
"isDba": CONTENT_TYPE.IS_DBA,
"getUsers": CONTENT_TYPE.USERS,
"getPasswordHashes": CONTENT_TYPE.PASSWORDS,
"getPrivileges": CONTENT_TYPE.PRIVILEGES,
"getRoles": CONTENT_TYPE.ROLES,
"getDbs": CONTENT_TYPE.DBS,
"getTables": CONTENT_TYPE.TABLES,
"getColumns": CONTENT_TYPE.COLUMNS,
"getSchema": CONTENT_TYPE.SCHEMA,
"getCount": CONTENT_TYPE.COUNT,
"dumpTable": CONTENT_TYPE.DUMP_TABLE,
"search": CONTENT_TYPE.SEARCH,
"sqlQuery": CONTENT_TYPE.SQL_QUERY,
"tableExists": CONTENT_TYPE.COMMON_TABLES,
"columnExists": CONTENT_TYPE.COMMON_COLUMNS,
"readFile": CONTENT_TYPE.FILE_READ,
"writeFile": CONTENT_TYPE.FILE_WRITE,
"osCmd": CONTENT_TYPE.OS_CMD,
"regRead": CONTENT_TYPE.REG_READ
}
class CONTENT_STATUS:
IN_PROGRESS = 0
COMPLETE = 1
class AUTH_TYPE:
BASIC = "basic"
DIGEST = "digest"
NTLM = "ntlm"
PKI = "pki"
class AUTOCOMPLETE_TYPE:
SQL = 0
OS = 1
SQLMAP = 2
|
Snifer/BurpSuite-Plugins
|
Sqlmap/lib/core/enums.py
|
Python
|
gpl-2.0
| 9,285
|
[
"Galaxy"
] |
e0176cb8cc94c983790c92f89750afdb9a58aa23d7ea806a208337b4bd607bd3
|
""" # lint-amnesty, pylint: disable=django-not-configured
Instructor Dashboard Views
"""
import datetime
import logging
import uuid
from functools import reduce
from unittest.mock import patch
import pytz
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponseServerError
from django.urls import reverse
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from edx_proctoring.api import does_backend_support_onboarding
from edx_when.api import is_enabled_for_course
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six.moves.urllib.parse import urljoin # lint-amnesty, pylint: disable=unused-import
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from common.djangoapps.course_modes.models import CourseMode, CourseModesArchive
from common.djangoapps.edxmako.shortcuts import render_to_response
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import (
CourseFinanceAdminRole,
CourseInstructorRole,
CourseSalesAdminRole,
CourseStaffRole
)
from common.djangoapps.util.json_request import JsonResponse
from lms.djangoapps.bulk_email.api import is_bulk_email_feature_enabled
from lms.djangoapps.certificates import api as certs_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationHistory,
CertificateInvalidation,
CertificateStatuses,
CertificateWhitelist,
GeneratedCertificate
)
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.courseware.courses import get_course_by_id, get_studio_url
from lms.djangoapps.courseware.module_render import get_module_by_usage_id
from lms.djangoapps.courseware.toggles import EXAM_RESUME_PROCTORING_IMPROVEMENTS
from lms.djangoapps.discussion.django_comment_client.utils import available_division_schemes, has_forum_access
from lms.djangoapps.grades.api import is_writable_gradebook_enabled
from openedx.core.djangoapps.course_groups.cohorts import DEFAULT_COHORT_NAME, get_course_cohorts, is_course_cohorted
from openedx.core.djangoapps.django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, CourseDiscussionSettings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.url_utils import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlBlock
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from .. import permissions
from ..toggles import data_download_v2_is_enabled
from .tools import get_units_with_due_date, title_or_url
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and user.is_authenticated and user.has_perm(permissions.VIEW_DASHBOARD, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id): # lint-amnesty, pylint: disable=too-many-statements
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error("Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
'data_researcher': request.user.has_perm(permissions.CAN_RESEARCH, course_key),
}
if not request.user.has_perm(permissions.VIEW_DASHBOARD, course_key):
raise Http404()
is_white_label = CourseMode.is_white_label(course_key) # lint-amnesty, pylint: disable=unused-variable
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False) # lint-amnesty, pylint: disable=unused-variable
sections = []
if access['staff']:
sections.extend([
_section_course_info(course, access),
_section_membership(course, access),
_section_cohort_management(course, access),
_section_discussions_management(course, access),
_section_student_admin(course, access),
])
if access['data_researcher']:
sections.append(_section_data_download(course, access))
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key) and (access['staff'] or access['instructor']):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{}/courses/{}'.format(settings.ANALYTICS_DASHBOARD_URL, str(course_key))
link_start = HTML("<a href=\"{}\" rel=\"noopener\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False # lint-amnesty, pylint: disable=unused-variable
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
"Course %s has %s course modes with payment options. Course must only have "
"one paid course mode to enable eCommerce options.",
str(course_key), len(paid_modes)
)
if access['instructor'] and is_enabled_for_course(course_key):
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if is_bulk_email_feature_enabled(course_key) and (access['staff'] or access['instructor']):
sections.append(_section_send_email(course, access))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
user_has_access = any([
request.user.is_staff,
CourseStaffRole(course_key).has_user(request.user),
CourseInstructorRole(course_key).has_user(request.user)
])
course_has_special_exams = course.enable_proctored_exams or course.enable_timed_exams
can_see_special_exams = course_has_special_exams and user_has_access and settings.FEATURES.get(
'ENABLE_SPECIAL_EXAMS', False)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
openassessment_blocks = modulestore().get_items(
course_key, qualifiers={'category': 'openassessment'}
)
# filter out orphaned openassessment blocks
openassessment_blocks = [
block for block in openassessment_blocks if block.parent is not None
]
if len(openassessment_blocks) > 0 and access['staff']:
sections.append(_section_open_response_assessment(request, course, openassessment_blocks, access))
disable_buttons = not CourseEnrollment.objects.is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse(
'generate_certificate_exceptions',
kwargs={'course_id': str(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse(
'generate_bulk_certificate_exceptions',
kwargs={'course_id': str(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': str(course_key)}
)
certificate_invalidation_view_url = reverse(
'certificate_invalidation_view',
kwargs={'course_id': str(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = str(course.id)
proctoring_provider = course.proctoring_provider
escalation_email = None
if proctoring_provider == 'proctortrack':
escalation_email = course.proctoring_escalation_email
from edx_proctoring.api import is_backend_dashboard_available
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': course_key,
'escalation_email': escalation_email,
'show_dashboard': is_backend_dashboard_available(course_key),
'show_onboarding': does_backend_support_onboarding(course.proctoring_provider),
'enable_exam_resume_proctoring_improvements': EXAM_RESUME_PROCTORING_IMPROVEMENTS.is_enabled(course.id),
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = CourseKey.from_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name_with_default,
'course_org': course.display_org_with_default,
'course_number': course.display_number_with_default,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(
_("Enrollment data is now available in {dashboard_link}.")
).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse(
'gitlogs_detail',
kwargs={'course_id': str(course_key)}
)
try:
sorted_cutoffs = sorted(list(course.grade_cutoffs.items()), key=lambda i: i[1], reverse=True)
advance = lambda memo, letter_score_tuple: "{}: {}, ".format(letter_score_tuple[0], letter_score_tuple[1]) \
+ memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
enrollment_role_choices = configuration_helpers.get_value('MANUAL_ENROLLMENT_ROLE_CHOICES',
settings.MANUAL_ENROLLMENT_ROLE_CHOICES)
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'upload_student_csv_button_url': reverse(
'register_and_enroll_students',
kwargs={'course_id': str(course_key)}
),
'modify_beta_testers_button_url': reverse(
'bulk_beta_modify_access',
kwargs={'course_id': str(course_key)}
),
'list_course_role_members_url': reverse(
'list_course_role_members',
kwargs={'course_id': str(course_key)}
),
'modify_access_url': reverse('modify_access', kwargs={'course_id': str(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': str(course_key)}),
'update_forum_role_membership_url': reverse(
'update_forum_role_membership',
kwargs={'course_id': str(course_key)}
),
'enrollment_role_choices': enrollment_role_choices,
'is_reason_field_enabled': configuration_helpers.get_value('ENABLE_MANUAL_ENROLLMENT_REASON_FIELD', False)
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': str(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': str(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': str(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': str(course_key)}
),
}
return section_data
def _section_discussions_management(course, access): # lint-amnesty, pylint: disable=unused-argument
""" Provide data for the corresponding discussion management section """
course_key = course.id
enrollment_track_schemes = available_division_schemes(course_key)
section_data = {
'section_key': 'discussions_management',
'section_display_name': _('Discussions'),
'is_hidden': (not is_course_cohorted(course_key) and
CourseDiscussionSettings.ENROLLMENT_TRACK not in enrollment_track_schemes),
'discussion_topics_url': reverse('discussion_topics', kwargs={'course_key_string': str(course_key)}),
'course_discussion_settings': reverse(
'course_discussions_settings',
kwargs={'course_key_string': str(course_key)}
),
}
return section_data
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = CourseEnrollment.objects.is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_enrollment_status_url': reverse(
'get_student_enrollment_status',
kwargs={'course_id': str(course_key)}
),
'get_student_progress_url_url': reverse(
'get_student_progress_url',
kwargs={'course_id': str(course_key)}
),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': str(course_key)}),
'reset_student_attempts_url': reverse(
'reset_student_attempts',
kwargs={'course_id': str(course_key)}
),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': str(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': str(course_key)}),
'override_problem_score_url': reverse(
'override_problem_score',
kwargs={'course_id': str(course_key)}
),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': str(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': str(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse(
'list_entrance_exam_instructor_tasks',
kwargs={'course_id': str(course_key)}
),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': str(course_key)}),
}
if is_writable_gradebook_enabled(course_key) and settings.WRITABLE_GRADEBOOK_URL:
section_data['writable_gradebook_url'] = '{}/{}'.format(settings.WRITABLE_GRADEBOOK_URL, str(course_key))
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), str(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': str(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': str(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': str(course.id)}),
'show_student_extensions_url': reverse(
'show_student_extensions',
kwargs={'course_id': str(course.id)}
),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_key = 'data_download_2' if data_download_v2_is_enabled() else 'data_download'
section_data = {
'section_key': section_key,
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': str(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': str(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': str(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': str(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': str(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': str(course_key)}),
'list_proctored_results_url': reverse(
'get_proctored_exam_results', kwargs={'course_id': str(course_key)}
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': str(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': str(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': str(course_key)}),
'course_has_survey': True if course.course_survey_name else False, # lint-amnesty, pylint: disable=simplifiable-if-expression
'course_survey_results_url': reverse(
'get_course_survey_results', kwargs={'course_id': str(course_key)}
),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': str(course_key)}),
'export_ora2_submission_files_url': reverse(
'export_ora2_submission_files', kwargs={'course_id': str(course_key)}
),
'export_ora2_summary_url': reverse('export_ora2_summary', kwargs={'course_id': str(course_key)}),
}
if not access.get('data_researcher'):
section_data['is_hidden'] = True
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlBlock for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlBlock is only being used to generate a nice text editor.
html_module = HtmlBlock(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": str(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(str(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().hex
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
course_modes = []
if not VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key):
course_modes = CourseMode.modes_for_course(course_key, include_expired=True, only_selectable=False)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': str(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'course_modes': course_modes,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': str(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': str(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': str(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{}/courses/{}'.format(settings.ANALYTICS_DASHBOARD_URL, str(course_key))
link = HTML("<a href=\"{0}\" rel=\"noopener\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': str(course.id),
}
return section_data
def _section_open_response_assessment(request, course, openassessment_blocks, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
ora_items = []
parents = {}
for block in openassessment_blocks:
block_parent_id = str(block.parent)
result_item_id = str(block.location)
if block_parent_id not in parents:
parents[block_parent_id] = modulestore().get_item(block.parent)
assessment_name = _("Team") + " : " + block.display_name if block.teams_enabled else block.display_name
ora_items.append({
'id': result_item_id,
'name': assessment_name,
'parent_id': block_parent_id,
'parent_name': parents[block_parent_id].display_name,
'staff_assessment': 'staff-assessment' in block.assessment_steps,
'url_base': reverse('xblock_view', args=[course.id, block.location, 'student_view']),
'url_grade_available_responses': reverse('xblock_view', args=[course.id, block.location,
'grade_available_responses_view']),
})
openassessment_block = openassessment_blocks[0]
block, __ = get_module_by_usage_id(
request, str(course_key), str(openassessment_block.location),
disable_staff_debug_info=True, course=course
)
section_data = {
'fragment': block.render('ora_blocks_listing_view', context={
'ora_items': ora_items,
'ora_item_view_enabled': settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False)
}),
'section_key': 'open_response_assessment',
'section_display_name': _('Open Responses'),
'access': access,
'course_id': str(course_key),
}
return section_data
def is_ecommerce_course(course_key):
"""
Checks if the given course is an e-commerce course or not, by checking its SKU value from
CourseMode records for the course
"""
sku_count = len([mode.sku for mode in CourseMode.modes_for_course(course_key) if mode.sku])
return sku_count > 0
|
stvstnfrd/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 34,219
|
[
"VisIt"
] |
aa2825c6bfe771cb635b28939d8c195127c1abb928b6219d9453e358273f96b6
|
from __future__ import absolute_import, print_function
import BaseHTTPServer
import SimpleHTTPServer
import base64
import io
import json
import threading
import time
import hashlib
import os
import sys
import urllib2
from decimal import Decimal
from optparse import OptionParser
from twisted.internet import reactor
# https://stackoverflow.com/questions/2801882/generating-a-png-with-matplotlib-when-display-is-undefined
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except:
print("matplotlib not found; do `pip install matplotlib`"
"in the joinmarket virtualenv.")
sys.exit(0)
from jmclient import jm_single, load_program_config, get_log, calc_cj_fee, get_irc_mchannels
from jmdaemon import OrderbookWatch, MessageChannelCollection, IRCMessageChannel
#TODO this is only for base58, find a solution for a client without jmbitcoin
import jmbitcoin as btc
from jmdaemon.protocol import *
log = get_log()
#Initial state: allow only SW offer types
swoffers = filter(lambda x: x[0:2] == 'sw', offername_list)
pkoffers = filter(lambda x: x[0:2] != 'sw', offername_list)
filtered_offername_list = swoffers
shutdownform = '<form action="shutdown" method="post"><input type="submit" value="Shutdown" /></form>'
shutdownpage = '<html><body><center><h1>Successfully Shut down</h1></center></body></html>'
toggleSWform = '<form action="toggleSW" method="post"><input type="submit" value="Toggle non-segwit" /></form>'
refresh_orderbook_form = '<form action="refreshorderbook" method="post"><input type="submit" value="Check for timed-out counterparties" /></form>'
sorted_units = ('BTC', 'mBTC', 'μBTC', 'satoshi')
unit_to_power = {'BTC': 8, 'mBTC': 5, 'μBTC': 2, 'satoshi': 0}
sorted_rel_units = ('%', '‱', 'ppm')
rel_unit_to_factor = {'%': 100, '‱': 1e4, 'ppm': 1e6}
def calc_depth_data(db, value):
pass
def create_depth_chart(db, cj_amount, args=None):
if args is None:
args = {}
rows = db.execute('SELECT * FROM orderbook;').fetchall()
sqlorders = [o for o in rows if o["ordertype"] in filtered_offername_list]
orderfees = sorted([calc_cj_fee(o['ordertype'], o['cjfee'], cj_amount) / 1e8
for o in sqlorders
if o['minsize'] <= cj_amount <= o[
'maxsize']])
if len(orderfees) == 0:
return 'No orders at amount ' + str(cj_amount / 1e8)
fig = plt.figure()
scale = args.get("scale")
if (scale is not None) and (scale[0] == "log"):
orderfees = [float(fee) for fee in orderfees]
if orderfees[0] > 0:
ratio = orderfees[-1] / orderfees[0]
step = ratio ** 0.0333 # 1/30
bins = [orderfees[0] * (step ** i) for i in range(30)]
else:
ratio = orderfees[-1] / 1e-8 # single satoshi placeholder
step = ratio ** 0.0333 # 1/30
bins = [1e-8 * (step ** i) for i in range(30)]
bins[0] = orderfees[0] # replace placeholder
plt.xscale('log')
else:
bins = 30
if len(orderfees) == 1: # these days we have liquidity, but just in case...
plt.hist(orderfees, bins, rwidth=0.8, range=(0, orderfees[0] * 2))
else:
plt.hist(orderfees, bins, rwidth=0.8)
plt.grid()
plt.title('CoinJoin Orderbook Depth Chart for amount=' + str(cj_amount /
1e8) + 'btc')
plt.xlabel('CoinJoin Fee / btc')
plt.ylabel('Frequency')
return get_graph_html(fig)
def create_size_histogram(db, args):
rows = db.execute('SELECT maxsize, ordertype FROM orderbook;').fetchall()
rows = [o for o in rows if o["ordertype"] in filtered_offername_list]
ordersizes = sorted([r['maxsize'] / 1e8 for r in rows])
fig = plt.figure()
scale = args.get("scale")
if (scale is not None) and (scale[0] == "log"):
ratio = ordersizes[-1] / ordersizes[0]
step = ratio ** 0.0333 # 1/30
bins = [ordersizes[0] * (step ** i) for i in range(30)]
else:
bins = 30
plt.hist(ordersizes, bins, histtype='bar', rwidth=0.8)
if bins is not 30:
fig.axes[0].set_xscale('log')
plt.grid()
plt.xlabel('Order sizes / btc')
plt.ylabel('Frequency')
return get_graph_html(fig) + ("<br/><a href='?scale=log'>log scale</a>" if
bins == 30 else "<br/><a href='?'>linear</a>")
def get_graph_html(fig):
imbuf = io.BytesIO()
fig.savefig(imbuf, format='png')
b64 = base64.b64encode(imbuf.getvalue())
return '<img src="data:image/png;base64,' + b64 + '" />'
# callback functions for displaying order data
def do_nothing(arg, order, btc_unit, rel_unit):
return arg
def ordertype_display(ordertype, order, btc_unit, rel_unit):
ordertypes = {'swabsoffer': 'SW Absolute Fee', 'swreloffer': 'SW Relative Fee',
'absoffer': 'Absolute Fee', 'reloffer': 'Relative Fee'}
return ordertypes[ordertype]
def cjfee_display(cjfee, order, btc_unit, rel_unit):
if order['ordertype'] in ['absoffer', 'swabsoffer']:
return satoshi_to_unit(cjfee, order, btc_unit, rel_unit)
elif order['ordertype'] in ['reloffer', 'swreloffer']:
return str(float(cjfee) * rel_unit_to_factor[rel_unit]) + rel_unit
def satoshi_to_unit(sat, order, btc_unit, rel_unit):
power = unit_to_power[btc_unit]
return ("%." + str(power) + "f") % float(
Decimal(sat) / Decimal(10 ** power))
def order_str(s, order, btc_unit, rel_unit):
return str(s)
def create_orderbook_table(db, btc_unit, rel_unit):
result = ''
rows = db.execute('SELECT * FROM orderbook;').fetchall()
if not rows:
return 0, result
#print("len rows before filter: " + str(len(rows)))
rows = [o for o in rows if o["ordertype"] in filtered_offername_list]
order_keys_display = (('ordertype', ordertype_display),
('counterparty', do_nothing), ('oid', order_str),
('cjfee', cjfee_display), ('txfee', satoshi_to_unit),
('minsize', satoshi_to_unit),
('maxsize', satoshi_to_unit))
# somewhat complex sorting to sort by cjfee but with swabsoffers on top
def orderby_cmp(x, y):
if x['ordertype'] == y['ordertype']:
return cmp(Decimal(x['cjfee']), Decimal(y['cjfee']))
return cmp(offername_list.index(x['ordertype']),
offername_list.index(y['ordertype']))
for o in sorted(rows, cmp=orderby_cmp):
result += ' <tr>\n'
for key, displayer in order_keys_display:
result += ' <td>' + displayer(o[key], o, btc_unit,
rel_unit) + '</td>\n'
result += ' </tr>\n'
return len(rows), result
def create_table_heading(btc_unit, rel_unit):
col = ' <th>{1}</th>\n' # .format(field,label)
tableheading = '<table class="tftable sortable" border="1">\n <tr>' + ''.join(
[
col.format('ordertype', 'Type'), col.format(
'counterparty', 'Counterparty'),
col.format('oid', 'Order ID'),
col.format('cjfee', 'Fee'), col.format(
'txfee', 'Miner Fee Contribution / ' + btc_unit),
col.format(
'minsize', 'Minimum Size / ' + btc_unit), col.format(
'maxsize', 'Maximum Size / ' + btc_unit)
]) + ' </tr>'
return tableheading
def create_choose_units_form(selected_btc, selected_rel):
choose_units_form = (
'<form method="get" action="">' +
'<select name="btcunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_units)) +
'</select><select name="relunit" onchange="this.form.submit();">' +
''.join(('<option>' + u + ' </option>' for u in sorted_rel_units)) +
'</select></form>')
choose_units_form = choose_units_form.replace(
'<option>' + selected_btc,
'<option selected="selected">' + selected_btc)
choose_units_form = choose_units_form.replace(
'<option>' + selected_rel,
'<option selected="selected">' + selected_rel)
return choose_units_form
class OrderbookPageRequestHeader(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, base_server):
self.taker = base_server.taker
self.base_server = base_server
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(
self, request, client_address, base_server)
def create_orderbook_obj(self):
rows = self.taker.db.execute('SELECT * FROM orderbook;').fetchall()
if not rows:
return []
result = []
for row in rows:
o = dict(row)
if 'cjfee' in o:
o['cjfee'] = int(o['cjfee']) if o['ordertype']\
== 'swabsoffer' else float(o['cjfee'])
result.append(o)
return result
def get_counterparty_count(self):
counterparties = self.taker.db.execute(
'SELECT DISTINCT counterparty FROM orderbook WHERE ordertype=? OR ordertype=?;',
filtered_offername_list).fetchall()
return str(len(counterparties))
def do_GET(self):
# SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
# print 'httpd received ' + self.path + ' request'
self.path, query = self.path.split('?', 1) if '?' in self.path else (
self.path, '')
args = urllib2.urlparse.parse_qs(query)
pages = ['/', '/ordersize', '/depth', '/orderbook.json']
if self.path not in pages:
return
fd = open('orderbook.html', 'r')
orderbook_fmt = fd.read()
fd.close()
alert_msg = ''
if jm_single().joinmarket_alert[0]:
alert_msg = '<br />JoinMarket Alert Message:<br />' + \
jm_single().joinmarket_alert[0]
if self.path == '/':
btc_unit = args['btcunit'][
0] if 'btcunit' in args else sorted_units[0]
rel_unit = args['relunit'][
0] if 'relunit' in args else sorted_rel_units[0]
if btc_unit not in sorted_units:
btc_unit = sorted_units[0]
if rel_unit not in sorted_rel_units:
rel_unit = sorted_rel_units[0]
ordercount, ordertable = create_orderbook_table(
self.taker.db, btc_unit, rel_unit)
choose_units_form = create_choose_units_form(btc_unit, rel_unit)
table_heading = create_table_heading(btc_unit, rel_unit)
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'JoinMarket Orderbook',
'SECONDHEADING':
(str(ordercount) + ' orders found by ' +
self.get_counterparty_count() + ' counterparties' + alert_msg),
'MAINBODY': (
toggleSWform + refresh_orderbook_form + choose_units_form +
table_heading + ordertable + '</table>\n')
}
elif self.path == '/ordersize':
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Order Sizes',
'SECONDHEADING': 'Order Size Histogram' + alert_msg,
'MAINBODY': create_size_histogram(self.taker.db, args)
}
elif self.path.startswith('/depth'):
# if self.path[6] == '?':
# quantity =
cj_amounts = [10 ** cja for cja in range(4, 12, 1)]
mainbody = [create_depth_chart(self.taker.db, cja, args) \
for cja in cj_amounts] + \
["<br/><a href='?'>linear</a>" if args.get("scale") \
else "<br/><a href='?scale=log'>log scale</a>"]
replacements = {
'PAGETITLE': 'JoinMarket Browser Interface',
'MAINHEADING': 'Depth Chart',
'SECONDHEADING': 'Orderbook Depth' + alert_msg,
'MAINBODY': '<br />'.join(mainbody)
}
elif self.path == '/orderbook.json':
replacements = {}
orderbook_fmt = json.dumps(self.create_orderbook_obj())
orderbook_page = orderbook_fmt
for key, rep in replacements.iteritems():
orderbook_page = orderbook_page.replace(key, rep)
self.send_response(200)
if self.path.endswith('.json'):
self.send_header('Content-Type', 'application/json')
else:
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(orderbook_page))
self.end_headers()
self.wfile.write(orderbook_page)
def do_POST(self):
global filtered_offername_list
pages = ['/shutdown', '/refreshorderbook', '/toggleSW']
if self.path not in pages:
return
if self.path == '/shutdown':
self.taker.msgchan.shutdown()
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(shutdownpage))
self.end_headers()
self.wfile.write(shutdownpage)
self.base_server.__shutdown_request = True
elif self.path == '/refreshorderbook':
self.taker.msgchan.request_orderbook()
time.sleep(5)
self.path = '/'
self.do_GET()
elif self.path == '/toggleSW':
if filtered_offername_list == swoffers:
filtered_offername_list = pkoffers
else:
filtered_offername_list = swoffers
self.path = '/'
self.do_GET()
class HTTPDThread(threading.Thread):
def __init__(self, taker, hostport):
threading.Thread.__init__(self, name='HTTPDThread')
self.daemon = True
self.taker = taker
self.hostport = hostport
def run(self):
# hostport = ('localhost', 62601)
httpd = BaseHTTPServer.HTTPServer(self.hostport,
OrderbookPageRequestHeader)
httpd.taker = self.taker
print('\nstarted http server, visit http://{0}:{1}/\n'.format(
*self.hostport))
httpd.serve_forever()
class ObBasic(OrderbookWatch):
"""Dummy orderbook watch class
with hooks for triggering orderbook request"""
def __init__(self, msgchan, hostport):
self.hostport = hostport
self.set_msgchan(msgchan)
def on_welcome(self):
"""TODO: It will probably be a bit
simpler, and more consistent, to use
a twisted http server here instead
of a thread."""
HTTPDThread(self, self.hostport).start()
self.request_orderbook()
def request_orderbook(self):
self.msgchan.request_orderbook()
class ObIRCMessageChannel(IRCMessageChannel):
"""A customisation of the message channel
to allow receipt of privmsgs without the
verification hooks in client-daemon communication."""
def on_privmsg(self, nick, message):
if len(message) < 2:
return
if message[0] != COMMAND_PREFIX:
log.debug('message not a cmd')
return
cmd_string = message[1:].split(' ')[0]
if cmd_string not in offername_list:
log.debug('non-offer ignored')
return
#Ignore sigs (TODO better to include check)
sig = message[1:].split(' ')[-2:]
#reconstruct original message without cmd pref
rawmessage = ' '.join(message[1:].split(' ')[:-2])
for command in rawmessage.split(COMMAND_PREFIX):
_chunks = command.split(" ")
try:
self.check_for_orders(nick, _chunks)
except:
pass
def get_dummy_nick():
"""In Joinmarket-CS nick creation is negotiated
between client and server/daemon so as to allow
client to sign for messages; here we only ever publish
an orderbook request, so no such need, but for better
privacy, a conformant nick is created based on a random
pseudo-pubkey."""
import binascii
nick_pkh_raw = hashlib.sha256(os.urandom(10)).digest()[:NICK_HASH_LENGTH]
nick_pkh = btc.changebase(nick_pkh_raw, 256, 58)
#right pad to maximum possible; b58 is not fixed length.
#Use 'O' as one of the 4 not included chars in base58.
nick_pkh += 'O' * (NICK_MAX_ENCODED - len(nick_pkh))
#The constructed length will be 1 + 1 + NICK_MAX_ENCODED
nick = JOINMARKET_NICK_HEADER + str(JM_VERSION) + nick_pkh
jm_single().nickname = nick
return nick
def main():
load_program_config(config_path='..')
parser = OptionParser(
usage='usage: %prog [options]',
description='Runs a webservice which shows the orderbook.')
parser.add_option('-H',
'--host',
action='store',
type='string',
dest='host',
default='localhost',
help='hostname or IP to bind to, default=localhost')
parser.add_option('-p',
'--port',
action='store',
type='int',
dest='port',
help='port to listen on, default=62601',
default=62601)
(options, args) = parser.parse_args()
hostport = (options.host, options.port)
mcs = [ObIRCMessageChannel(c) for c in get_irc_mchannels()]
mcc = MessageChannelCollection(mcs)
mcc.set_nick(get_dummy_nick())
taker = ObBasic(mcc, hostport)
log.info("Starting ob-watcher")
mcc.run()
if __name__ == "__main__":
main()
reactor.run()
print('done')
|
chris-belcher/joinmarket-clientserver
|
scripts/obwatch/ob-watcher.py
|
Python
|
gpl-3.0
| 18,029
|
[
"VisIt"
] |
06f035fc4071fd4e2e10510d164281cc7e57667e8b4cb6bba68f468930b5ba55
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.versions import VersionsAsyncClient
from google.cloud.dialogflowcx_v3beta1.services.versions import VersionsClient
from google.cloud.dialogflowcx_v3beta1.services.versions import pagers
from google.cloud.dialogflowcx_v3beta1.services.versions import transports
from google.cloud.dialogflowcx_v3beta1.types import flow
from google.cloud.dialogflowcx_v3beta1.types import version
from google.cloud.dialogflowcx_v3beta1.types import version as gcdc_version
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VersionsClient._get_default_mtls_endpoint(None) is None
assert VersionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
VersionsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VersionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert VersionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VersionsGrpcTransport, "grpc"),
(transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient,])
def test_versions_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_client_get_transport_class():
transport = VersionsClient.get_transport_class()
available_transports = [
transports.VersionsGrpcTransport,
]
assert transport in available_transports
transport = VersionsClient.get_transport_class("grpc")
assert transport == transports.VersionsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(VersionsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "true"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(VersionsClient, transports.VersionsGrpcTransport, "grpc", "false"),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_versions_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [VersionsClient, VersionsAsyncClient])
@mock.patch.object(
VersionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VersionsClient)
)
@mock.patch.object(
VersionsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VersionsAsyncClient),
)
def test_versions_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc"),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_versions_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_versions_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.versions.transports.VersionsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VersionsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(VersionsClient, transports.VersionsGrpcTransport, "grpc", grpc_helpers),
(
VersionsAsyncClient,
transports.VersionsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_versions_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [version.ListVersionsRequest, dict,])
def test_list_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse(
next_page_token="next_page_token_value",
)
response = client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
client.list_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
@pytest.mark.asyncio
async def test_list_versions_async(
transport: str = "grpc_asyncio", request_type=version.ListVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.ListVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListVersionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_versions_async_from_dict():
await test_list_versions_async(request_type=dict)
def test_list_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = version.ListVersionsResponse()
client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.ListVersionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
await client.list_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.ListVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.ListVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_versions(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_versions(
version.ListVersionsRequest(), parent="parent_value",
)
def test_list_versions_pager(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_versions(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, version.Version) for i in results)
def test_list_versions_pages(transport_name: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_versions), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = list(client.list_versions(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_versions_async_pager():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
async_pager = await client.list_versions(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, version.Version) for i in responses)
@pytest.mark.asyncio
async def test_list_versions_async_pages():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_versions), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
version.ListVersionsResponse(
versions=[version.Version(), version.Version(), version.Version(),],
next_page_token="abc",
),
version.ListVersionsResponse(versions=[], next_page_token="def",),
version.ListVersionsResponse(
versions=[version.Version(),], next_page_token="ghi",
),
version.ListVersionsResponse(
versions=[version.Version(), version.Version(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_versions(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [version.GetVersionRequest, dict,])
def test_get_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=version.Version.State.RUNNING,
)
response = client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == version.Version.State.RUNNING
def test_get_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
client.get_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
@pytest.mark.asyncio
async def test_get_version_async(
transport: str = "grpc_asyncio", request_type=version.GetVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=version.Version.State.RUNNING,
)
)
response = await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.GetVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == version.Version.State.RUNNING
@pytest.mark.asyncio
async def test_get_version_async_from_dict():
await test_get_version_async(request_type=dict)
def test_get_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = version.Version()
client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.GetVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
await client.get_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(version.Version())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_version(
version.GetVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcdc_version.CreateVersionRequest, dict,])
def test_create_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
client.create_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
@pytest.mark.asyncio
async def test_create_version_async(
transport: str = "grpc_asyncio", request_type=gcdc_version.CreateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.CreateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_version_async_from_dict():
await test_create_version_async(request_type=dict)
def test_create_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.CreateVersionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_version(
parent="parent_value", version=gcdc_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
def test_create_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_version(
gcdc_version.CreateVersionRequest(),
parent="parent_value",
version=gcdc_version.Version(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_version(
parent="parent_value", version=gcdc_version.Version(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_version(
gcdc_version.CreateVersionRequest(),
parent="parent_value",
version=gcdc_version.Version(name="name_value"),
)
@pytest.mark.parametrize("request_type", [gcdc_version.UpdateVersionRequest, dict,])
def test_update_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=gcdc_version.Version.State.RUNNING,
)
response = client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == gcdc_version.Version.State.RUNNING
def test_update_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
client.update_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
@pytest.mark.asyncio
async def test_update_version_async(
transport: str = "grpc_asyncio", request_type=gcdc_version.UpdateVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version(
name="name_value",
display_name="display_name_value",
description="description_value",
state=gcdc_version.Version.State.RUNNING,
)
)
response = await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_version.UpdateVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_version.Version)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.state == gcdc_version.Version.State.RUNNING
@pytest.mark.asyncio
async def test_update_version_async_from_dict():
await test_update_version_async(request_type=dict)
def test_update_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = gcdc_version.Version()
client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_version.UpdateVersionRequest()
request.version.name = "version.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version()
)
await client.update_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "version.name=version.name/value",) in kw[
"metadata"
]
def test_update_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_version(
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_version(
gcdc_version.UpdateVersionRequest(),
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_version.Version()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_version.Version()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_version(
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].version
mock_val = gcdc_version.Version(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_version(
gcdc_version.UpdateVersionRequest(),
version=gcdc_version.Version(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [version.DeleteVersionRequest, dict,])
def test_delete_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
client.delete_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
@pytest.mark.asyncio
async def test_delete_version_async(
transport: str = "grpc_asyncio", request_type=version.DeleteVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.DeleteVersionRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_version_async_from_dict():
await test_delete_version_async(request_type=dict)
def test_delete_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = None
client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.DeleteVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_version(
version.DeleteVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [version.LoadVersionRequest, dict,])
def test_load_version(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_load_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
client.load_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
@pytest.mark.asyncio
async def test_load_version_async(
transport: str = "grpc_asyncio", request_type=version.LoadVersionRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.LoadVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_load_version_async_from_dict():
await test_load_version_async(request_type=dict)
def test_load_version_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.LoadVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_load_version_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.LoadVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.load_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_load_version_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.load_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_load_version_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.load_version(
version.LoadVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_load_version_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.load_version), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.load_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_load_version_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.load_version(
version.LoadVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [version.CompareVersionsRequest, dict,])
def test_compare_versions(request_type, transport: str = "grpc"):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse(
base_version_content_json="base_version_content_json_value",
target_version_content_json="target_version_content_json_value",
)
response = client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.CompareVersionsResponse)
assert response.base_version_content_json == "base_version_content_json_value"
assert response.target_version_content_json == "target_version_content_json_value"
def test_compare_versions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
client.compare_versions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
@pytest.mark.asyncio
async def test_compare_versions_async(
transport: str = "grpc_asyncio", request_type=version.CompareVersionsRequest
):
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse(
base_version_content_json="base_version_content_json_value",
target_version_content_json="target_version_content_json_value",
)
)
response = await client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == version.CompareVersionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, version.CompareVersionsResponse)
assert response.base_version_content_json == "base_version_content_json_value"
assert response.target_version_content_json == "target_version_content_json_value"
@pytest.mark.asyncio
async def test_compare_versions_async_from_dict():
await test_compare_versions_async(request_type=dict)
def test_compare_versions_field_headers():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.CompareVersionsRequest()
request.base_version = "base_version/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
call.return_value = version.CompareVersionsResponse()
client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "base_version=base_version/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_compare_versions_field_headers_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = version.CompareVersionsRequest()
request.base_version = "base_version/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse()
)
await client.compare_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "base_version=base_version/value",) in kw[
"metadata"
]
def test_compare_versions_flattened():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.compare_versions(base_version="base_version_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].base_version
mock_val = "base_version_value"
assert arg == mock_val
def test_compare_versions_flattened_error():
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.compare_versions(
version.CompareVersionsRequest(), base_version="base_version_value",
)
@pytest.mark.asyncio
async def test_compare_versions_flattened_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.compare_versions), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = version.CompareVersionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
version.CompareVersionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.compare_versions(base_version="base_version_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].base_version
mock_val = "base_version_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_compare_versions_flattened_error_async():
client = VersionsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.compare_versions(
version.CompareVersionsRequest(), base_version="base_version_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VersionsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VersionsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VersionsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VersionsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VersionsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VersionsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.VersionsGrpcTransport,)
def test_versions_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_versions_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.versions.transports.VersionsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VersionsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_versions",
"get_version",
"create_version",
"update_version",
"delete_version",
"load_version",
"compare_versions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_versions_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_versions_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflowcx_v3beta1.services.versions.transports.VersionsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VersionsTransport()
adc.assert_called_once()
def test_versions_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VersionsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport,],
)
def test_versions_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VersionsGrpcTransport, grpc_helpers),
(transports.VersionsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_versions_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_versions_host_no_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_versions_host_with_port():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_versions_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_versions_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VersionsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.VersionsGrpcTransport, transports.VersionsGrpcAsyncIOTransport],
)
def test_versions_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_versions_grpc_lro_client():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_versions_grpc_lro_async_client():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_version_path():
project = "squid"
location = "clam"
agent = "whelk"
flow = "octopus"
version = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/versions/{version}".format(
project=project, location=location, agent=agent, flow=flow, version=version,
)
actual = VersionsClient.version_path(project, location, agent, flow, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"flow": "winkle",
"version": "nautilus",
}
path = VersionsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VersionsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = VersionsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = VersionsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = VersionsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = VersionsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = VersionsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = VersionsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = VersionsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VersionsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = VersionsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VersionsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VersionsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VersionsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VersionsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VersionsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(VersionsClient, transports.VersionsGrpcTransport),
(VersionsAsyncClient, transports.VersionsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow-cx
|
tests/unit/gapic/dialogflowcx_v3beta1/test_versions.py
|
Python
|
apache-2.0
| 110,374
|
[
"Octopus"
] |
445a1df933e0ab02653e1d2a0048097a6f3be9b30ce12fc6673f8f86b21dbb31
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import zipfile
import pandas as pd
from PIL import Image
import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import backend as K
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import numpy as np
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.data import XShards
from bigdl.orca.learn.tf.estimator import Estimator
def load_data_from_zip(file_path, file):
with zipfile.ZipFile(os.path.join(file_path, file), "r") as zip_ref:
unzipped_file = zip_ref.namelist()[0]
zip_ref.extractall(file_path)
def load_data(file_path):
load_data_from_zip(file_path, 'train.zip')
load_data_from_zip(file_path, 'train_masks.zip')
load_data_from_zip(file_path, 'train_masks.csv.zip')
def main(cluster_mode, max_epoch, file_path, batch_size, platform, non_interactive):
import matplotlib
if not non_interactive and platform == "mac":
matplotlib.use('qt5agg')
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4, memory="3g")
elif cluster_mode.startswith("yarn"):
init_orca_context(cluster_mode=cluster_mode, num_nodes=2, cores=2, driver_memory="3g")
elif cluster_mode == "spark-submit":
init_orca_context(cluster_mode="spark-submit")
load_data(file_path)
img_dir = os.path.join(file_path, "train")
label_dir = os.path.join(file_path, "train_masks")
# Here we only take the first 1000 files for simplicity
df_train = pd.read_csv(os.path.join(file_path, 'train_masks.csv'))
ids_train = df_train['img'].map(lambda s: s.split('.')[0])
ids_train = ids_train[:1000]
x_train_filenames = []
y_train_filenames = []
for img_id in ids_train:
x_train_filenames.append(os.path.join(img_dir, "{}.jpg".format(img_id)))
y_train_filenames.append(os.path.join(label_dir, "{}_mask.gif".format(img_id)))
x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = \
train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=42)
def load_and_process_image(path):
array = mpimg.imread(path)
result = np.array(Image.fromarray(array).resize(size=(128, 128)))
result = result.astype(float)
result /= 255.0
return result
def load_and_process_image_label(path):
array = mpimg.imread(path)
result = np.array(Image.fromarray(array).resize(size=(128, 128)))
result = np.expand_dims(result[:, :, 1], axis=-1)
result = result.astype(float)
result /= 255.0
return result
train_images = np.stack([load_and_process_image(filepath) for filepath in x_train_filenames])
train_label_images = np.stack([load_and_process_image_label(filepath)
for filepath in y_train_filenames])
val_images = np.stack([load_and_process_image(filepath) for filepath in x_val_filenames])
val_label_images = np.stack([load_and_process_image_label(filepath)
for filepath in y_val_filenames])
train_shards = XShards.partition({"x": train_images, "y": train_label_images})
val_shards = XShards.partition({"x": val_images, "y": val_label_images})
# Build the U-Net model
def conv_block(input_tensor, num_filters):
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
encoder = layers.Activation('relu')(encoder)
encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
encoder = layers.Activation('relu')(encoder)
return encoder
def encoder_block(input_tensor, num_filters):
encoder = conv_block(input_tensor, num_filters)
encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters):
decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(
input_tensor)
decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.Activation('relu')(decoder)
decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
decoder = layers.Activation('relu')(decoder)
return decoder
inputs = layers.Input(shape=(128, 128, 3)) # 128
encoder0_pool, encoder0 = encoder_block(inputs, 16) # 64
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 32) # 32
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 64) # 16
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 128) # 8
center = conv_block(encoder3_pool, 256) # center
decoder3 = decoder_block(center, encoder3, 128) # 16
decoder2 = decoder_block(decoder3, encoder2, 64) # 32
decoder1 = decoder_block(decoder2, encoder1, 32) # 64
decoder0 = decoder_block(decoder1, encoder0, 16) # 128
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
net = models.Model(inputs=[inputs], outputs=[outputs])
# Define custom metrics
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / \
(tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
# Define custom loss function
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
# compile model
net.compile(optimizer=tf.keras.optimizers.Adam(2e-3), loss=bce_dice_loss)
print(net.summary())
# create an estimator from keras model
est = Estimator.from_keras(keras_model=net)
# fit with estimator
est.fit(data=train_shards,
batch_size=batch_size,
epochs=max_epoch)
# evaluate with estimator
result = est.evaluate(val_shards)
print(result)
# predict with estimator
val_shards.cache()
val_image_shards = val_shards.transform_shard(lambda val_dict: {"x": val_dict["x"]})
pred_shards = est.predict(data=val_image_shards, batch_size=batch_size)
pred = pred_shards.collect()[0]["prediction"]
val_image_label = val_shards.collect()[0]
val_image = val_image_label["x"]
val_label = val_image_label["y"]
if not non_interactive:
# visualize 5 predicted results
plt.figure(figsize=(10, 20))
for i in range(5):
img = val_image[i]
label = val_label[i]
predicted_label = pred[i]
plt.subplot(5, 3, 3 * i + 1)
plt.imshow(img)
plt.title("Input image")
plt.subplot(5, 3, 3 * i + 2)
plt.imshow(label[:, :, 0], cmap='gray')
plt.title("Actual Mask")
plt.subplot(5, 3, 3 * i + 3)
plt.imshow(predicted_label, cmap='gray')
plt.title("Predicted Mask")
plt.suptitle("Examples of Input Image, Label, and Prediction")
plt.show()
stop_orca_context()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local, yarn or spark-submit.')
parser.add_argument('--file_path', type=str, default="/tmp/carvana/",
help="The path to carvana train.zip, train_mask.zip and train_mask.csv.zip")
parser.add_argument('--epochs', type=int, default=8,
help="The number of epochs to train the model")
parser.add_argument('--batch_size', type=int, default=8,
help="Batch size for training and prediction")
parser.add_argument('--platform', type=str, default="linux",
help="The platform you used. Only linux and mac are supported.")
parser.add_argument('--non_interactive', default=False, action="store_true",
help="Flag to not visualize the result.")
args = parser.parse_args()
main(args.cluster_mode, args.epochs, args.file_path, args.batch_size, args.platform,
args.non_interactive)
|
intel-analytics/BigDL
|
python/orca/example/learn/tf/image_segmentation/image_segmentation.py
|
Python
|
apache-2.0
| 9,307
|
[
"ORCA"
] |
2a3f7571a2d5c841dbdc54fd89b840c469cdf55e6332b1d121e37fe5da3a8c4f
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.schema import Table as dpTable
from dp_tornado.engine.schema import Attribute as dpAttribute
class ArticlesSchema(dpTable):
__table_name__ = 'articles'
article_id = dpAttribute.field(dpAttribute.DataType.INT, ai=True, pk=True, nn=True, un=True, comment='Article ID')
title = dpAttribute.field(dpAttribute.DataType.VARCHAR(128), nn=True, comment='Title')
content = dpAttribute.field(dpAttribute.DataType.LONGTEXT, nn=True, comment='Content')
author = dpAttribute.field(dpAttribute.DataType.VARCHAR(64), comment='Author')
signdate = dpAttribute.field(dpAttribute.DataType.INT, comment='Signdate')
idx_articles_title = dpAttribute.index(dpAttribute.IndexType.INDEX, 'title')
idx_articles_author = dpAttribute.index(dpAttribute.IndexType.INDEX, 'author')
idx_articles_signdate = dpAttribute.index(dpAttribute.IndexType.INDEX, 'signdate')
__dummy_data__ = [
{'article_id': 1, 'title': 'Helloworld!', 'content': 'Hello!', 'author': 'James', 'signdate': 1478772800},
{'article_id': 2, 'title': 'Hi!', 'content': 'Hi! Nice to meet you!', 'author': 'Amber', 'signdate': 1478782800},
{'article_id': 3, 'title': 'Nice to meet you!', 'content': 'Hello. Nice to meet you!', 'author': 'Alice', 'signdate': 1478792800},
{'article_id': 4, 'title': 'Good morning!', 'content': 'Hi! Good morning!', 'author': 'Elsa', 'signdate': 1478802800},
{'article_id': 5, 'title': 'Good afternoon!', 'content': 'Hi! Good afternoon!', 'author': 'Kevin', 'signdate': 1478812800},
{'article_id': 6, 'title': 'Good evening!', 'content': 'Hi! Good evening!', 'author': 'Sam', 'signdate': 1478822800},
{'article_id': 7, 'title': 'Good night!', 'content': 'Hi! Good night!', 'author': 'Thomas', 'signdate': 1478832800},
{'article_id': 8, 'title': 'What up!', 'content': 'Hi! What up!', 'author': 'Tim', 'signdate': 1478842800},
{'article_id': 9, 'title': 'Yo!', 'content': 'Hi! Yo!', 'author': 'William', 'signdate': 1478852800},
{'article_id': 10, 'title': 'Hor are you?', 'content': 'Hi! What up!', 'author': 'Oscar', 'signdate': 1478862800},
{'article_id': 11, 'title': 'Good bye!', 'content': 'Hi! Good bye!', 'author': 'Walter', 'signdate': 1478872800},
{'article_id': 12, 'title': 'See you again!', 'content': 'Hi! See you again!', 'author': 'Jackson', 'signdate': 1478882800},
{'article_id': 13, 'title': 'Hello!', 'content': 'Hi! What up!', 'author': 'Mike', 'signdate': 1478892800}
]
|
why2pac/dp-tornado
|
dp_tornado/engine/template/bbs/schema/bbs/articles.py
|
Python
|
mit
| 2,558
|
[
"Amber"
] |
fe0e7d505a293ce95e3377cfeab97462d04477053907c7f7e6f00dc9e7e63f63
|
# Copyright (C) 2013 by Ben Morris (ben@bendmorris.com)
# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox
# and Bio.Phylo.Newick, copyright 2009 by Eric Talevich.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for the RDF/CDAO file format.
This is an RDF format that conforms to the Comparative Data Analysis Ontology (CDAO).
See: http://www.evolutionaryontology.org/cdao
This module requires the librdf Python bindings (http://www.librdf.org)
The CDAOIO.Parser, in addition to parsing text files, can also parse directly
from a triple store that implements the Redland storage interface; similarly,
the CDAOIO.Writer can store triples in a triple store instead of serializing
them to a file.
"""
import sys
# Add path to Bio
sys.path.append('../..')
__docformat__ = "restructuredtext en"
from Bio._py3k import StringIO
from Bio.Phylo import CDAO
from Bio.Phylo._cdao_owl import cdao_elements, cdao_namespaces, resolve_uri
import os
class CDAOError(Exception):
"""Exception raised when CDAO object construction cannot continue."""
pass
try:
import rdflib
rdfver = rdflib.__version__
if rdfver[0] in ["1", "2"] or (rdfver in ["3.0.0", "3.1.0", "3.2.0"]):
raise CDAOError(
'Support for CDAO tree format requires RDFlib v3.2.1 or later.')
except ImportError:
raise CDAOError('Support for CDAO tree format requires RDFlib.')
RDF_NAMESPACES = {
'owl': 'http://www.w3.org/2002/07/owl#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
}
RDF_NAMESPACES.update(cdao_namespaces)
# pad node ids with zeroes until they're at least this length
ZEROES = 8
def qUri(x):
return resolve_uri(x, namespaces=RDF_NAMESPACES)
def format_label(x):
return x.replace('_', ' ')
# ---------------------------------------------------------
# Public API
def parse(handle, **kwargs):
"""Iterate over the trees in a CDAO file handle.
:returns: generator of Bio.Phylo.CDAO.Tree objects.
"""
return Parser(handle).parse(**kwargs)
def write(trees, handle, plain=False, **kwargs):
"""Write a trees in CDAO format to the given file handle.
:returns: number of trees written.
"""
return Writer(trees).write(handle, plain=plain, **kwargs)
# ---------------------------------------------------------
# Input
class Parser(object):
"""Parse a CDAO tree given a file handle."""
def __init__(self, handle=None):
self.handle = handle
self.graph = None
self.node_info = None
self.children = {}
self.rooted = False
@classmethod
def from_string(cls, treetext):
handle = StringIO(treetext)
return cls(handle)
def parse(self, **kwargs):
"""Parse the text stream this object was initialized with."""
self.parse_handle_to_graph(**kwargs)
return self.parse_graph()
def parse_handle_to_graph(self, rooted=False,
parse_format='turtle', context=None, **kwargs):
"""Parse self.handle into RDF model self.model."""
if self.graph is None:
self.graph = rdflib.Graph()
graph = self.graph
for k, v in RDF_NAMESPACES.items():
graph.bind(k, v)
self.rooted = rooted
if 'base_uri' in kwargs:
base_uri = kwargs['base_uri']
else:
base_uri = "file://" + os.path.abspath(self.handle.name)
graph.parse(file=self.handle, publicID=base_uri, format=parse_format)
return self.parse_graph(graph, context=context)
def parse_graph(self, graph=None, context=None):
"""Generator that yields CDAO.Tree instances from an RDF model."""
if graph is None:
graph = self.graph
# look up branch lengths/TUs for all nodes
self.get_node_info(graph, context=context)
for root_node in self.tree_roots:
clade = self.parse_children(root_node)
yield CDAO.Tree(root=clade, rooted=self.rooted)
def new_clade(self, node):
"""Returns a CDAO.Clade object for a given named node."""
result = self.node_info[node]
kwargs = {}
if 'branch_length' in result:
kwargs['branch_length'] = result['branch_length']
if 'label' in result:
kwargs['name'] = result['label'].replace('_', ' ')
if 'confidence' in result:
kwargs['confidence'] = result['confidence']
clade = CDAO.Clade(**kwargs)
return clade
def get_node_info(self, graph, context=None):
"""Creates a dictionary containing information about all nodes in the tree."""
self.node_info = {}
self.obj_info = {}
self.children = {}
self.nodes = set()
self.tree_roots = set()
assignments = {
qUri('cdao:has_Parent'): 'parent',
qUri('cdao:belongs_to_Edge_as_Child'): 'edge',
qUri('cdao:has_Annotation'): 'annotation',
qUri('cdao:has_Value'): 'value',
qUri('cdao:represents_TU'): 'tu',
qUri('rdfs:label'): 'label',
qUri('cdao:has_Support_Value'): 'confidence',
}
for s, v, o in graph:
# process each RDF triple in the graph sequentially
s, v, o = str(s), str(v), str(o)
if s not in self.obj_info:
self.obj_info[s] = {}
this = self.obj_info[s]
try:
# if the predicate is one we care about, store information for
# later
this[assignments[v]] = o
except KeyError:
pass
if v == qUri('rdf:type'):
if o in (qUri('cdao:AncestralNode'), qUri('cdao:TerminalNode')):
# this is a tree node; store it in set of all nodes
self.nodes.add(s)
if v == qUri('cdao:has_Root'):
# this is a tree; store its root in set of all tree roots
self.tree_roots.add(o)
for node in self.nodes:
# for each node, look up all information needed to create a
# CDAO.Clade
self.node_info[node] = {}
node_info = self.node_info[node]
obj = self.obj_info[node]
if 'edge' in obj:
# if this object points to an edge, we need a branch length from
# the annotation on that edge
edge = self.obj_info[obj['edge']]
if 'annotation' in edge:
annotation = self.obj_info[edge['annotation']]
if 'value' in annotation:
node_info['branch_length'] = float(annotation['value'])
if 'tu' in obj:
# if this object points to a TU, we need the label of that TU
tu = self.obj_info[obj['tu']]
if 'label' in tu:
node_info['label'] = tu['label']
if 'parent' in obj:
# store this node as a child of its parent, if it has one,
# so that the tree can be traversed from parent to children
parent = obj['parent']
if parent not in self.children:
self.children[parent] = []
self.children[parent].append(node)
def parse_children(self, node):
"""Traverse the tree to create a nested clade structure.
Return a CDAO.Clade, and calls itself recursively for each child,
traversing the entire tree and creating a nested structure of CDAO.Clade
objects.
"""
clade = self.new_clade(node)
children = self.children[node] if node in self.children else []
clade.clades = [
self.parse_children(child_node) for child_node in children]
return clade
# ---------------------------------------------------------
# Output
class Writer(object):
"""Based on the writer in Bio.Nexus.Trees (str, to_string)."""
prefixes = RDF_NAMESPACES
def __init__(self, trees):
self.trees = trees
self.node_counter = 0
self.edge_counter = 0
self.tu_counter = 0
self.tree_counter = 0
def write(self, handle, tree_uri='', record_complete_ancestry=False,
rooted=False, **kwargs):
"""Write this instance's trees to a file handle."""
self.rooted = rooted
self.record_complete_ancestry = record_complete_ancestry
if tree_uri and not tree_uri.endswith('/'):
tree_uri += '/'
trees = self.trees
if tree_uri:
handle.write('@base <%s>\n' % tree_uri)
for k, v in self.prefixes.items():
handle.write('@prefix %s: <%s> .\n' % (k, v))
handle.write('<%s> a owl:Ontology .\n' % self.prefixes['cdao'])
for tree in trees:
self.tree_counter += 1
self.tree_uri = 'tree%s'
first_clade = tree.clade
statements = self.process_clade(first_clade, root=tree)
for stmt in statements:
self.add_stmt_to_handle(handle, stmt)
def add_stmt_to_handle(self, handle, stmt):
# apply URI prefixes
stmt_strings = []
for n, part in enumerate(stmt):
if isinstance(part, rdflib.URIRef):
node_uri = str(part)
changed = False
for prefix, uri in self.prefixes.items():
if node_uri.startswith(uri):
node_uri = node_uri.replace(uri, '%s:' % prefix, 1)
if node_uri == 'rdf:type':
node_uri = 'a'
changed = True
if changed or ':' in node_uri:
stmt_strings.append(node_uri)
else:
stmt_strings.append('<%s>' % node_uri)
elif isinstance(part, rdflib.Literal):
stmt_strings.append(part.n3())
else:
stmt_strings.append(str(part))
handle.write('%s .\n' % ' '.join(stmt_strings))
def process_clade(self, clade, parent=None, root=False):
"""recursively generate triples describing a tree of clades"""
self.node_counter += 1
clade.uri = 'node%s' % str(self.node_counter).zfill(ZEROES)
if parent:
clade.ancestors = parent.ancestors + [parent.uri]
else:
clade.ancestors = []
nUri = lambda s: rdflib.URIRef(s)
pUri = lambda s: rdflib.URIRef(qUri(s))
tree_id = nUri('')
statements = []
if root is not False:
# create a cdao:RootedTree with reference to the tree root
tree_type = pUri('cdao:RootedTree') if self.rooted else pUri(
'cdao:UnrootedTree')
statements += [
(tree_id, pUri('rdf:type'), tree_type),
(tree_id, pUri('cdao:has_Root'), nUri(clade.uri)),
]
try:
tree_attributes = root.attributes
except AttributeError:
tree_attributes = []
for predicate, obj in tree_attributes:
statements.append((tree_id, predicate, obj))
if clade.name:
# create TU
self.tu_counter += 1
tu_uri = 'tu%s' % str(self.tu_counter).zfill(ZEROES)
statements += [
(nUri(tu_uri), pUri('rdf:type'), pUri('cdao:TU')),
(nUri(clade.uri), pUri(
'cdao:represents_TU'), nUri(tu_uri)),
(nUri(tu_uri), pUri('rdfs:label'),
rdflib.Literal(format_label(clade.name))),
]
try:
tu_attributes = clade.tu_attributes
except AttributeError:
tu_attributes = []
for predicate, obj in tu_attributes:
yield (nUri(tu_uri), predicate, obj)
# create this node
node_type = 'cdao:TerminalNode' if clade.is_terminal(
) else 'cdao:AncestralNode'
statements += [
(nUri(clade.uri), pUri('rdf:type'), pUri(node_type)),
(nUri(clade.uri), pUri(
'cdao:belongs_to_Tree'), tree_id),
]
if parent is not None:
# create edge from the parent node to this node
self.edge_counter += 1
edge_uri = 'edge%s' % str(self.edge_counter).zfill(ZEROES)
statements += [
(nUri(edge_uri), pUri('rdf:type'), pUri('cdao:DirectedEdge')),
(nUri(edge_uri), pUri(
'cdao:belongs_to_Tree'), tree_id),
(nUri(edge_uri), pUri('cdao:has_Parent_Node'),
nUri(parent.uri)),
(nUri(edge_uri), pUri('cdao:has_Child_Node'),
nUri(clade.uri)),
(nUri(clade.uri), pUri(
'cdao:belongs_to_Edge_as_Child'), nUri(edge_uri)),
(nUri(clade.uri), pUri('cdao:has_Parent'),
nUri(parent.uri)),
(nUri(parent.uri), pUri(
'cdao:belongs_to_Edge_as_Parent'), nUri(edge_uri)),
]
if hasattr(clade, 'confidence') and clade.confidence is not None:
confidence = rdflib.Literal(
clade.confidence, datatype='http://www.w3.org/2001/XMLSchema#decimal')
statements += [(nUri(clade.uri),
pUri('cdao:has_Support_Value'), confidence)]
if self.record_complete_ancestry and len(clade.ancestors) > 0:
statements += [(nUri(clade.uri), pUri('cdao:has_Ancestor'), nUri(ancestor))
for ancestor in clade.ancestors]
if clade.branch_length is not None:
# add branch length
edge_ann_uri = 'edge_annotation%s' % str(
self.edge_counter).zfill(ZEROES)
branch_length = rdflib.Literal(clade.branch_length, datatype=rdflib.URIRef(
'http://www.w3.org/2001/XMLSchema#decimal'))
statements += [
(nUri(edge_ann_uri), pUri('rdf:type'),
pUri('cdao:EdgeLength')),
(nUri(edge_uri), pUri('cdao:has_Annotation'),
nUri(edge_ann_uri)),
(nUri(edge_ann_uri),
pUri('cdao:has_Value'), branch_length),
]
try:
edge_attributes = clade.edge_attributes
except AttributeError:
edge_attributes = []
for predicate, obj in edge_attributes:
yield (nUri(edge_uri), predicate, obj)
for stmt in statements:
yield stmt
try:
clade_attributes = clade.attributes
except AttributeError:
clade_attributes = []
for predicate, obj in clade_attributes:
yield (nUri(clade.uri), predicate, obj)
if not clade.is_terminal():
for new_clade in clade.clades:
for stmt in self.process_clade(new_clade, parent=clade, root=False):
yield stmt
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Phylo/CDAOIO.py
|
Python
|
gpl-2.0
| 15,435
|
[
"Biopython"
] |
29ce6cf951f032d9773453952e36ba1548b15eb38cf7cc0dd3137c7b1b40456c
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.community.NetWare'
VERSION = '1.0'
AUTHOR = 'Eric Feldman'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.NetWare']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.3'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
anksp21/Community-Zenpacks
|
ZenPacks.community.NetWare/setup.py
|
Python
|
gpl-2.0
| 3,270
|
[
"VisIt"
] |
1f9703346069782f00002e09b059738c190ed066a68c2577e50e58b2250e37b4
|
# Copyright 2006-2012 Mark Diekhans
# this module is tested by pycbio/src/progs/gbff/gbffGenesToGenePred
from pycbio.sys import PycbioException
from Bio import SeqFeature
class GbffExcept(PycbioException):
pass
def featHaveQual(feat, key):
"does a feature have a qualifier?"
return (key in feat.qualifiers)
def featGetQual1(feat, key):
"""get the single valued qualifier, or None if not found. Returns first
value and error if qualifier has more than one value"""
val = feat.qualifiers.get(key)
if val is None:
return None
if len(val) != 1:
raise GbffExcept("qualifier \"" + key + "\" has multiple values")
return val[0]
def featMustGetQual1(feat, key):
val = featGetQual1(feat, key)
if val is None:
raise GbffExcept("qualifier \""+key+"\" not found in feature: " + str(feat))
return val
def featGetQual1ByKeys(feat, keys):
"get single-valued qualifier based on first matching key"
for key in keys:
val = featGetQual1(feat, key)
if val is not None:
return val
return None
def featMustGetQual1ByKeys(feat, keys):
"get a single valued qualifier based on first matching key, or error"
val = featGetQual1ByKeys(feat, keys)
if val is None:
featRaiseNeedAQual(feat, keys)
return val
def featRaiseNeedAQual(feat, quals):
"raise error about one of the qualifiers not being found"
raise GbffExcept("didn't find any of these qualifiers: "
+ ", ".join(quals) + " in feature: " + str(feat))
def featGetDbXRef(feat, dbXRefPrefix):
"return a dbXRef starting with dbXRefPrefix (include `:' in key), or None if not found"
dbXRefs = feat.qualifiers.get("db_xref")
if dbXRefs is not None:
for dbXRef in dbXRefs:
if dbXRef.startswith(dbXRefPrefix):
return dbXRef[len(dbXRefPrefix):]
return None
def featGetGeneId(feat):
"get a db_ref qualifier for GeneID, or None"
return featGetDbXRef(feat, "GeneID:")
def featMustGetGeneId(feat):
"""get a db_ref qualifier for GeneID or error if not found"""
# FIXME: at one point returned locus id if gene id not found still needed?
val = featGetGeneId(feat)
if val is None:
raise GbffExcept("db_xref GeneID not found in feature: " + str(feat))
return val
def featGetLocusId(feat):
"get a db_ref qualifier for LocusId, or None"
return featGetDbXRef(feat, "LocusID:")
def featGetGID(feat):
"get a db_ref qualifier for GI, or None"
return featGetDbXRef(feat, "GI:")
def featGetCdsId(feat):
"get a CDS identifier from qualifier, or None"
return featGetQual1ByKeys(feat, ("protein_id", "standard_name"))
class Coord(object):
"[0..n) coord"
__slots__ = ("start", "end", "strand")
def __init__(self, start, end, strand):
"stand can be +/- or -1/+1, converted to +/-"
self.start = start
self.end = end
if isinstance(strand, int):
self.strand = "+" if (strand > 0) else "-"
else:
self.strand = strand
def __str__(self):
return str(self.start) + ".." + str(self.end) + "/"+str(self.strand)
def size(self):
return self.end-self.start
def __cmp__(self, other):
if not isinstance(other, Coord):
return -1
else:
d = cmp(self.strand, other.strand)
if d == 0:
d = cmp(self.start, other.start)
if d == 0:
d = cmp(self.end, other.end)
return d
def overlaps(self, other):
return (self.start < other.end) and (self.end > other.start) and (self.strand == other.strand)
def contains(self, other):
return (other.start >= self.start) and (other.end <= self.end) and (self.strand == other.strand)
@staticmethod
def fromFeatureLocation(loc, strand):
"convert to a FeatureLocation object to a Coord"
return Coord(loc.start.position, loc.end.position, strand)
class Coords(list):
"List of Coord objects"
def __init__(self, init=None):
if init is not None:
list.__init__(self, init)
assert((len(self)==0) or isinstance(self[0], Coord))
else:
list.__init__(self)
def __str__(self):
strs = []
for c in self:
strs.append(str(c))
return ",".join(strs)
def size(self):
s = 0
for c in self:
s += c.size()
return s
def getRange(self):
"""get Coord covered by this object, which must be sorted"""
if len(self) == 0:
return None
else:
return Coord(self[0].start, self[-1].end, self[0].strand)
def findContained(self, coord):
"find index of first range containing coord, or None"
for i in xrange(len(self)):
if self[i].contains(coord):
return i
return None
def isContained(self, other):
"""Are all blocks in other contained within blocks of self. This
doesn't check for all bases of the containing blocks being covered.
This handles fame shift CDS, where a base in the mRNA block may not be
covered."""
oi = 0
si = 0
while oi < len(other):
# find next self block containing other[oi]
while (si < len(self)) and (self[si].end < other[oi].start):
si += 1
if (si >= len(self)) or (self[si].start >= other[oi].end) or not self[si].contains(other[oi]):
return False
oi += 1
return True
def __cnvSeqFeature(self, location, strand):
self.append(Coord.fromFeatureLocation(location, strand))
@staticmethod
def fromSeqFeature(feat):
"""Convert Biopython SeqFeature object to Coords. This will handle sub_features"""
isinstance(feat, SeqFeature.SeqFeature)
coords = Coords()
if isinstance(feat.location, SeqFeature.CompoundLocation):
for location in feat.location.parts:
coords.__cnvSeqFeature(location, feat.strand)
else:
coords.__cnvSeqFeature(feat.location, feat.strand)
coords.sort()
return coords
|
ifiddes/pycbio
|
pycbio/ncbi/gbff.py
|
Python
|
bsd-3-clause
| 6,284
|
[
"Biopython"
] |
f9a081b8fecb47b270c4df65ca46323adbf186f9988db5864d36c5d239d947c7
|
"""Finds the FOF groups changed after Balrog-injection and unchanged after Balrog-injection.
Author: Brian Yanny"""
#!/usr/bin/env python
import sys
import csv
if(len(sys.argv) != 3):
sys.exit("Usage: python par.py matchlist outprefixname")
fn=sys.argv[1]
outprefix=sys.argv[2]
gidlist={}
gidsizelist={}
fofidlist={}
dumplist={}
othergidlist={}
otherlist={}
othersizelist={}
outok=open(outprefix+'.ok','w')
outrerunmof=open(outprefix+'.rerun','w')
with open(fn) as csvfile:
reader=csv.DictReader(csvfile)
for row in reader:
num=row['number_1_1']
gid=row['GroupID_1']
gsize=row['GroupSize_1']
if (gid==""):
gid=0
gsize=0
othergid=row['GroupID_2']
othersize=row['GroupSize_2']
if (othergid==""):
othergid=0
othersize=0
fofid=row['fofid_1']
#print "num:",num," gid:",gid,gsize,othergid,othersize,fofid
if (gid!=0):
if gid not in gidlist:
if(gsize != othersize):
outrerunmof.write(str(num)+' '+str(fofid)+'\n')
dumplist[int(num)] = 1
#print "group size mismatch"
else:
gidlist[gid]=[int(num)]
gidsizelist[gid]=int(gsize)
fofidlist[gid]=int(fofid)
othergidlist[gid]=othergid
otherlist[othergid]=[int(num)]
othersizelist[othergid]=int(othersize)
else:
if (othergid != othergidlist[gid]):
b=gidlist[gid]
for a in range(0,len(b)):
tnum=int(b[a])
if tnum not in dumplist:
outrerunmof.write(str(b[a])+' '+str(fofid)+'\n')
dumplist[int(b[a])] = 1
if num not in dumplist:
outrerunmof.write(str(num)+' '+str(fofid)+'\n')
dumplist[int(num)] = 1
#print "otherlist id mismatch"
else:
gidlist[gid].append(int(num))
otherlist[othergid].append(int(num))
if len(gidlist[gid]) == gidsizelist[gid]:
b=gidlist[gid]
for a in range(0,len(b)):
tnum=int(b[a])
if tnum not in dumplist:
outok.write(str(b[a])+' '+str(fofid)+'\n')
dumplist[int(b[a])] = 1
else:
if (othergid==0):
outok.write(str(num)+' '+str(fofid)+'\n')
dumplist[int(num)]=1
else:
if num not in dumplist:
outrerunmof.write(str(num)+' '+str(fofid)+'\n')
dumplist[int(num)]=1
#print "othergid not 0"
for q in gidlist.keys():
if(len(gidlist[q]) != gidsizelist[q]):
b=gidlist[q]
#print q,b
for a in range(0,len(b)):
num=int(b[a])
if num not in dumplist:
outrerunmof.write(str(num)+' '+str(fofidlist[q])+'\n')
dumplist[int(num)]=1
outrerunmof.close()
outok.close()
|
sweverett/Balrog-GalSim
|
plots/ms_par.py
|
Python
|
mit
| 2,777
|
[
"Brian"
] |
279a505a7afbf4398d35f58726a9f19d4850bad8f922d1c231c907c10f05af0f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Views tests for the OSF.'''
from __future__ import absolute_import
import unittest
import json
import datetime as dt
import mock
import httplib as http
import math
import time
from nose.tools import * # noqa PEP8 asserts
from tests.test_features import requires_search
from modularodm import Q, fields
from modularodm.exceptions import ValidationError
from dateutil.parser import parse as parse_date
from framework import auth
from framework.exceptions import HTTPError
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from framework.auth.exceptions import InvalidTokenError
from framework.tasks import handlers
from website import mailchimp_utils
from website.views import _rescale_ratio
from website.util import permissions
from website.models import Node, Pointer, NodeLog
from website.project.model import ensure_schemas, has_anonymous_link
from website.project.views.contributor import (
send_claim_email,
deserialize_contributors,
send_claim_registered_email,
notify_added_contributor
)
from website.profile.utils import add_contributor_json, serialize_unregistered
from website.profile.views import fmt_date_or_none
from website.util import api_url_for, web_url_for
from website import mails, settings
from website.util import rubeus
from website.project.views.node import _view_project, abbrev_authors, _should_show_wiki_widget
from website.project.views.comment import serialize_comment
from website.project.decorators import check_can_access
from website.project.signals import contributor_added
from website.addons.github.model import AddonGitHubOauthSettings
from website.archiver import utils as archiver_utils
from tests.base import (
OsfTestCase,
fake,
capture_signals,
assert_is_redirect,
assert_datetime_equal,
)
from tests.factories import (
UserFactory, ApiOAuth2ApplicationFactory, ProjectFactory, WatchConfigFactory,
NodeFactory, NodeLogFactory, AuthUserFactory, UnregUserFactory,
RegistrationFactory, CommentFactory, PrivateLinkFactory, UnconfirmedUserFactory, DashboardFactory, FolderFactory,
ProjectWithAddonFactory, MockAddonNodeSettings,
)
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID
class Addon(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class Addon2(MockAddonNodeSettings):
@property
def complete(self):
return True
def archive_errors(self):
return 'Error'
class TestViewingProjectWithPrivateLink(OsfTestCase):
def setUp(self):
super(TestViewingProjectWithPrivateLink, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_not_anonymous_for_public_project(self):
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.append(self.project)
anonymous_link.save()
self.project.set_privacy('public')
self.project.save()
self.project.reload()
auth = Auth(user=self.user, private_key=anonymous_link.key)
assert_false(has_anonymous_link(self.project, auth))
def test_has_private_link_key(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_equal(res.status_code, 200)
def test_not_logged_in_no_key(self):
res = self.app.get(self.project_url, {'view_only': None})
assert_is_redirect(res)
res = res.follow(expect_errors=True)
assert_equal(res.status_code, 301)
assert_equal(
res.request.path,
'/login'
)
def test_logged_in_no_private_key(self):
res = self.app.get(self.project_url, {'view_only': None}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_logged_in_has_key(self):
res = self.app.get(
self.project_url, {'view_only': self.link.key}, auth=self.user.auth)
assert_equal(res.status_code, 200)
@unittest.skip('Skipping for now until we find a way to mock/set the referrer')
def test_prepare_private_key(self):
res = self.app.get(self.project_url, {'key': self.link.key})
res = res.click('Registrations')
assert_is_redirect(res)
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.GET['key'], self.link.key)
def test_check_can_access_valid(self):
contributor = AuthUserFactory()
self.project.add_contributor(contributor, auth=Auth(self.project.creator))
self.project.save()
assert_true(check_can_access(self.project, contributor))
def test_check_user_access_invalid(self):
noncontrib = AuthUserFactory()
with assert_raises(HTTPError):
check_can_access(self.project, noncontrib)
def test_check_user_access_if_user_is_None(self):
assert_false(check_can_access(self.project, None))
class TestProjectViews(OsfTestCase):
ADDONS_UNDER_TEST = {
'addon1': {
'node_settings': Addon,
},
'addon2': {
'node_settings': Addon2,
},
}
def setUp(self):
super(TestProjectViews, self).setUp()
ensure_schemas()
self.user1 = AuthUserFactory()
self.user1.save()
self.consolidate_auth1 = Auth(user=self.user1)
self.auth = self.user1.auth
self.user2 = UserFactory()
# A project has 2 contributors
self.project = ProjectFactory(
title="Ham",
description='Honey-baked',
creator=self.user1
)
self.project.add_contributor(self.user2, auth=Auth(self.user1))
self.project.save()
def test_cannot_remove_only_visible_contributor_before_remove_contributor(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
url = self.project.api_url_for('project_before_remove_contributor')
res = self.app.post_json(
url, {'id': self.user2._id}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
def test_cannot_remove_only_visible_contributor_remove_contributor(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
url = self.project.api_url_for('project_removecontributor')
res = self.app.post_json(
url, {'id': self.user2._id}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_equal(res.json['message_long'], 'Must have at least one bibliographic contributor')
assert_true(self.project.is_contributor(self.user2))
def test_remove_only_visible_contributor_return_false(self):
self.project.visible_contributor_ids.remove(self.user1._id)
self.project.save()
ret = self.project.remove_contributor(contributor=self.user2, auth=self.consolidate_auth1)
assert_false(ret)
self.project.reload()
assert_true(self.project.is_contributor(self.user2))
def test_can_view_nested_project_as_admin(self):
self.parent_project = NodeFactory(
title='parent project',
category='project',
parent=self.project,
is_public=False
)
self.parent_project.save()
self.child_project = NodeFactory(
title='child project',
category='project',
parent=self.parent_project,
is_public=False
)
self.child_project.save()
url = self.child_project.web_url_for('view_project')
res = self.app.get(url, auth=self.auth)
assert_not_in('Private Project', res.body)
assert_in('parent project', res.body)
def test_edit_description(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
self.app.post_json(url,
{"name": "description", "value": "Deep-fried"},
auth=self.auth)
self.project.reload()
assert_equal(self.project.description, "Deep-fried")
def test_project_api_url(self):
url = self.project.api_url
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(data['node']['category'], 'Project')
assert_equal(data['node']['node_type'], 'project')
assert_equal(data['node']['title'], self.project.title)
assert_equal(data['node']['is_public'], self.project.is_public)
assert_equal(data['node']['is_registration'], False)
assert_equal(data['node']['id'], self.project._primary_key)
assert_equal(data['node']['watched_count'], 0)
assert_true(data['user']['is_contributor'])
assert_equal(data['node']['description'], self.project.description)
assert_equal(data['node']['url'], self.project.url)
assert_equal(data['node']['tags'], [t._primary_key for t in self.project.tags])
assert_in('forked_date', data['node'])
assert_in('watched_count', data['node'])
assert_in('registered_from_url', data['node'])
# TODO: Test "parent" and "user" output
def test_api_get_folder_pointers(self):
dashboard = DashboardFactory(creator=self.user1)
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = dashboard.api_url_for("get_folder_pointers")
dashboard.add_pointer(project_one, auth=self.consolidate_auth1)
dashboard.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_in(project_one._id, pointers)
assert_in(project_two._id, pointers)
assert_equal(len(pointers), 2)
def test_api_get_folder_pointers_from_non_folder(self):
project_one = ProjectFactory(creator=self.user1)
project_two = ProjectFactory(creator=self.user1)
url = project_one.api_url_for("get_folder_pointers")
project_one.add_pointer(project_two, auth=self.consolidate_auth1)
res = self.app.get(url, auth=self.auth)
pointers = res.json
assert_equal(len(pointers), 0)
def test_new_user_gets_dashboard_on_dashboard_path(self):
my_user = AuthUserFactory()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 0)
url = api_url_for('get_dashboard')
self.app.get(url, auth=my_user.auth)
my_user.reload()
dashboard = my_user.node__contributed.find(Q('is_dashboard', 'eq', True))
assert_equal(dashboard.count(), 1)
def test_add_contributor_post(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
user2 = UserFactory()
user3 = UserFactory()
url = "/api/v1/project/{0}/contributors/".format(project._id)
dict2 = add_contributor_json(user2)
dict3 = add_contributor_json(user3)
dict2.update({
'permission': 'admin',
'visible': True,
})
dict3.update({
'permission': 'write',
'visible': False,
})
self.app.post_json(
url,
{
'users': [dict2, dict3],
'node_ids': [project._id],
},
content_type="application/json",
auth=self.auth,
).maybe_follow()
project.reload()
assert_in(user2._id, project.contributors)
# A log event was added
assert_equal(project.logs[-1].action, "contributor_added")
assert_equal(len(project.contributors), 3)
assert_in(user2._id, project.permissions)
assert_in(user3._id, project.permissions)
assert_equal(project.permissions[user2._id], ['read', 'write', 'admin'])
assert_equal(project.permissions[user3._id], ['read', 'write'])
def test_manage_permissions(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user1._id, 'permission': 'read',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user1), ['read'])
assert_equal(self.project.get_permissions(self.user2), ['read', 'write', 'admin'])
def test_manage_permissions_again(self):
url = self.project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
self.app.post_json(
url,
{
'contributors': [
{'id': self.user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': self.user2._id, 'permission': 'read',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
self.project.reload()
assert_equal(self.project.get_permissions(self.user2), ['read'])
assert_equal(self.project.get_permissions(self.user1), ['read', 'write', 'admin'])
def test_contributor_manage_reorder(self):
# Two users are added as a contributor via a POST request
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': False},
]
)
# Add a non-registered user
unregistered_user = project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url + 'contributors/manage/'
self.app.post_json(
url,
{
'contributors': [
{'id': reg_user2._id, 'permission': 'admin',
'registered': True, 'visible': False},
{'id': project.creator._id, 'permission': 'admin',
'registered': True, 'visible': True},
{'id': unregistered_user._id, 'permission': 'admin',
'registered': False, 'visible': True},
{'id': reg_user1._id, 'permission': 'admin',
'registered': True, 'visible': True},
]
},
auth=self.auth,
)
project.reload()
assert_equal(
# Note: Cast ForeignList to list for comparison
list(project.contributors),
[reg_user2, project.creator, unregistered_user, reg_user1]
)
assert_equal(
project.visible_contributors,
[project.creator, unregistered_user, reg_user1]
)
def test_project_remove_contributor(self):
url = "/api/v1/project/{0}/removecontributors/".format(self.project._id)
# User 1 removes user2
self.app.post(url, json.dumps({"id": self.user2._id}),
content_type="application/json",
auth=self.auth).maybe_follow()
self.project.reload()
assert_not_in(self.user2._id, self.project.contributors)
# A log event was added
assert_equal(self.project.logs[-1].action, "contributor_removed")
def test_get_contributors_abbrev(self):
# create a project with 3 registered contributors
project = ProjectFactory(creator=self.user1, is_public=True)
reg_user1, reg_user2 = UserFactory(), UserFactory()
project.add_contributors(
[
{'user': reg_user1, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
{'user': reg_user2, 'permissions': [
'read', 'write', 'admin'], 'visible': True},
]
)
# add an unregistered contributor
project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=self.consolidate_auth1,
save=True,
)
url = project.api_url_for('get_node_contributors_abbrev')
res = self.app.get(url, auth=self.auth)
assert_equal(len(project.contributors), 4)
assert_equal(len(res.json['contributors']), 3)
assert_equal(len(res.json['others_count']), 1)
assert_equal(res.json['contributors'][0]['separator'], ',')
assert_equal(res.json['contributors'][1]['separator'], ',')
assert_equal(res.json['contributors'][2]['separator'], ' &')
def test_edit_node_title(self):
url = "/api/v1/project/{0}/edit/".format(self.project._id)
# The title is changed though posting form data
self.app.post_json(url, {"name": "title", "value": "Bacon"},
auth=self.auth).maybe_follow()
self.project.reload()
# The title was changed
assert_equal(self.project.title, "Bacon")
# A log event was saved
assert_equal(self.project.logs[-1].action, "edit_title")
def test_make_public(self):
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_true(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_make_private(self):
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.project.reload()
assert_false(self.project.is_public)
assert_equal(res.json['status'], 'success')
def test_cant_make_public_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = False
self.project.save()
url = "/api/v1/project/{0}/permissions/public/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(self.project.is_public)
def test_cant_make_private_if_not_admin(self):
non_admin = AuthUserFactory()
self.project.add_contributor(non_admin, permissions=['read', 'write'])
self.project.is_public = True
self.project.save()
url = "/api/v1/project/{0}/permissions/private/".format(self.project._id)
res = self.app.post_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
assert_true(self.project.is_public)
def test_add_tag(self):
url = self.project.api_url_for('project_add_tag')
self.app.post_json(url, {'tag': "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
def test_remove_tag(self):
self.project.add_tag("foo'ta#@%#%^&g?", auth=self.consolidate_auth1, save=True)
assert_in("foo'ta#@%#%^&g?", self.project.tags)
url = self.project.api_url_for("project_remove_tag")
self.app.delete_json(url, {"tag": "foo'ta#@%#%^&g?"}, auth=self.auth)
self.project.reload()
assert_not_in("foo'ta#@%#%^&g?", self.project.tags)
assert_equal("tag_removed", self.project.logs[-1].action)
assert_equal("foo'ta#@%#%^&g?", self.project.logs[-1].params['tag'])
@mock.patch('website.archiver.tasks.archive')
def test_register_template_page(self, mock_archive):
url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format(
self.project._primary_key)
self.app.post_json(url, {'registrationChoice': 'Make registration public immediately'}, auth=self.auth)
self.project.reload()
# A registration was added to the project's registration list
assert_equal(len(self.project.node__registrations), 1)
# A log event was saved
assert_equal(self.project.logs[-1].action, "registration_initiated")
# Most recent node is a registration
reg = Node.load(self.project.node__registrations[-1])
assert_true(reg.is_registration)
@mock.patch('website.archiver.tasks.archive')
def test_register_template_with_embargo_creates_embargo(self, mock_archive):
url = "/api/v1/project/{0}/register/Replication_Recipe_(Brandt_et_al.,_2013):_Post-Completion/".format(
self.project._primary_key)
self.app.post_json(
url,
{
'registrationChoice': 'embargo',
'embargoEndDate': "Fri, 01 Jan {year} 05:00:00 GMT".format(year=str(dt.date.today().year + 1))
},
auth=self.auth)
self.project.reload()
# Most recent node is a registration
reg = Node.load(self.project.node__registrations[-1])
assert_true(reg.is_registration)
# The registration created is not public
assert_false(reg.is_public)
# The registration is pending an embargo that has not been approved
assert_true(reg.is_pending_embargo)
def test_register_template_page_with_invalid_template_name(self):
url = self.project.web_url_for('node_register_template_page', template='invalid')
res = self.app.get(url, expect_errors=True, auth=self.auth)
assert_equal(res.status_code, 404)
assert_in('Template not found', res)
def test_register_project_with_multiple_errors(self):
self.project.add_addon('addon1', auth=Auth(self.user1))
component = NodeFactory(parent=self.project, creator=self.user1)
component.add_addon('addon1', auth=Auth(self.user1))
component.add_addon('addon2', auth=Auth(self.user1))
self.project.save()
component.save()
url = self.project.api_url_for('project_before_register')
res = self.app.get(url, auth=self.auth)
data = res.json
assert_equal(res.status_code, 200)
assert_equal(len(data['errors']), 2)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478
@mock.patch('website.archiver.tasks.archive')
def test_registered_projects_contributions(self, mock_archive):
# register a project
self.project.register_node(None, Auth(user=self.project.creator), '', None)
# get the first registered project of a project
url = self.project.api_url_for('get_registrations')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
def test_forks_contributions(self):
# fork a project
self.project.fork_node(Auth(user=self.project.creator))
# get the first forked project of a project
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.auth)
data = res.json
pid = data['nodes'][0]['id']
url2 = api_url_for('get_summary', pid=pid)
# count contributions
res2 = self.app.get(url2, {'rescale_ratio': data['rescale_ratio']}, auth=self.auth)
data = res2.json
assert_is_not_none(data['summary']['nlogs'])
@mock.patch('framework.transactions.commands.begin')
@mock.patch('framework.transactions.commands.rollback')
@mock.patch('framework.transactions.commands.commit')
def test_get_logs(self, *mock_commands):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
for mock_command in mock_commands:
assert_false(mock_command.called)
self.project.reload()
data = res.json
assert_equal(len(data['logs']), len(self.project.logs))
assert_equal(data['total'], len(self.project.logs))
assert_equal(data['page'], 0)
assert_equal(data['pages'], 1)
most_recent = data['logs'][0]
assert_equal(most_recent['action'], 'file_added')
def test_get_logs_invalid_page_input(self):
url = self.project.api_url_for('get_logs')
invalid_input = 'invalid page'
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_negative_page_num(self):
url = self.project.api_url_for('get_logs')
invalid_input = -1
res = self.app.get(
url, {'page': invalid_input}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_page_num_beyond_limit(self):
url = self.project.api_url_for('get_logs')
size = 10
page_num = math.ceil(len(self.project.logs)/ float(size))
res = self.app.get(
url, {'page': page_num}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_logs_with_count_param(self):
# Add some logs
for _ in range(5):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {'count': 3}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 5 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 3)
def test_get_logs_defaults_to_ten(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action='file_added',
params={'node': self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 5 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
def test_get_more_logs(self):
# Add some logs
for _ in range(12):
self.project.logs.append(
NodeLogFactory(
user=self.user1,
action="file_added",
params={"node": self.project._id}
)
)
self.project.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url, {"page": 1}, auth=self.auth)
assert_equal(len(res.json['logs']), 4)
#1 project create log, 1 add contributor log, then 12 generated logs
assert_equal(res.json['total'], 12 + 2)
assert_equal(res.json['page'], 1)
assert_equal(res.json['pages'], 2)
def test_logs_private(self):
"""Add logs to a public project, then to its private component. Get
the ten most recent logs; assert that ten logs are returned and that
all belong to the project and not its component.
"""
# Add some logs
for _ in range(15):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
for _ in range(5):
child.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': child._id}
)
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 10)
# 1 project create log, 1 add contributor log, then 15 generated logs
assert_equal(res.json['total'], 15 + 2)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(
[self.project._id] * 10,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_can_view_public_log_from_private_project(self):
project = ProjectFactory(is_public=True)
fork = project.fork_node(auth=self.consolidate_auth1)
url = fork.api_url_for('get_logs')
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
project.is_public = False
project.save()
res = self.app.get(url, auth=self.auth)
assert_equal(
[each['action'] for each in res.json['logs']],
['node_forked', 'project_created'],
)
def test_for_private_component_log(self):
for _ in range(5):
self.project.add_log(
auth=self.consolidate_auth1,
action='file_added',
params={'node': self.project._id}
)
self.project.is_public = True
self.project.save()
child = NodeFactory(parent=self.project)
child.is_public = False
child.set_title("foo", auth=self.consolidate_auth1)
child.set_title("bar", auth=self.consolidate_auth1)
child.save()
url = self.project.api_url_for('get_logs')
res = self.app.get(url).maybe_follow()
assert_equal(len(res.json['logs']), 7)
assert_not_in(
child._id,
[
log['params']['node']
for log in res.json['logs']
]
)
def test_remove_project(self):
url = self.project.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
self.project.reload()
assert_equal(self.project.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], '/dashboard/')
def test_private_link_edit_name(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
assert_equal(link.name, "link")
url = self.project.api_url + 'private_link/edit/'
self.app.put_json(
url,
{'pk': link._id, "value": "new name"},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_equal(link.name, "new name")
def test_remove_private_link(self):
link = PrivateLinkFactory()
link.nodes.append(self.project)
link.save()
url = self.project.api_url_for('remove_private_link')
self.app.delete_json(
url,
{'private_link_id': link._id},
auth=self.auth,
).maybe_follow()
self.project.reload()
link.reload()
assert_true(link.is_deleted)
def test_remove_component(self):
node = NodeFactory(parent=self.project, creator=self.user1)
url = node.api_url
res = self.app.delete_json(url, {}, auth=self.auth).maybe_follow()
node.reload()
assert_equal(node.is_deleted, True)
assert_in('url', res.json)
assert_equal(res.json['url'], self.project.url)
def test_cant_remove_component_if_not_admin(self):
node = NodeFactory(parent=self.project, creator=self.user1)
non_admin = AuthUserFactory()
node.add_contributor(
non_admin,
permissions=['read', 'write'],
save=True,
)
url = node.api_url
res = self.app.delete_json(
url, {}, auth=non_admin.auth,
expect_errors=True,
).maybe_follow()
assert_equal(res.status_code, http.FORBIDDEN)
assert_false(node.is_deleted)
def test_watch_and_unwatch(self):
url = self.project.api_url_for('togglewatch_post')
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 1)
self.app.post_json(url, {}, auth=self.auth)
res = self.app.get(self.project.api_url, auth=self.auth)
assert_equal(res.json['node']['watched_count'], 0)
def test_view_project_returns_whether_to_show_wiki_widget(self):
user = AuthUserFactory()
project = ProjectFactory.build(creator=user, is_public=True)
project.add_contributor(user)
project.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_equal(res.status_code, http.OK)
assert_in('show_wiki_widget', res.json['user'])
def test_fork_count_does_not_include_deleted_forks(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
auth = Auth(project.creator)
fork = project.fork_node(auth)
project.save()
fork.remove_node(auth)
fork.save()
url = project.api_url_for('view_project')
res = self.app.get(url, auth=user.auth)
assert_in('fork_count', res.json['node'])
assert_equal(0, res.json['node']['fork_count'])
def test_statistic_page_redirect(self):
url = self.project.web_url_for('project_statistics_redirect')
res = self.app.get(url, auth=self.auth)
assert_equal(res.status_code, 302)
assert_in(self.project.web_url_for('project_statistics', _guid=True), res.location)
class TestEditableChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.child = ProjectFactory(parent=self.project, creator=self.user, is_public=True)
self.grandchild = ProjectFactory(parent=self.child, creator=self.user, is_public=False)
self.great_grandchild = ProjectFactory(parent=self.grandchild, creator=self.user, is_public=True)
self.great_great_grandchild = ProjectFactory(parent=self.great_grandchild, creator=self.user, is_public=False)
url = self.project.api_url_for('get_editable_children')
self.project_results = self.app.get(url, auth=self.user.auth).json
def test_get_editable_children(self):
assert_equal(len(self.project_results['children']), 4)
assert_equal(self.project_results['node']['id'], self.project._id)
def test_editable_children_order(self):
assert_equal(self.project_results['children'][0]['id'], self.child._id)
assert_equal(self.project_results['children'][1]['id'], self.grandchild._id)
assert_equal(self.project_results['children'][2]['id'], self.great_grandchild._id)
assert_equal(self.project_results['children'][3]['id'], self.great_great_grandchild._id)
def test_editable_children_indents(self):
assert_equal(self.project_results['children'][0]['indent'], 0)
assert_equal(self.project_results['children'][1]['indent'], 1)
assert_equal(self.project_results['children'][2]['indent'], 2)
assert_equal(self.project_results['children'][3]['indent'], 3)
def test_editable_children_parents(self):
assert_equal(self.project_results['children'][0]['parent_id'], self.project._id)
assert_equal(self.project_results['children'][1]['parent_id'], self.child._id)
assert_equal(self.project_results['children'][2]['parent_id'], self.grandchild._id)
assert_equal(self.project_results['children'][3]['parent_id'], self.great_grandchild._id)
def test_editable_children_privacy(self):
assert_false(self.project_results['node']['is_public'])
assert_true(self.project_results['children'][0]['is_public'])
assert_false(self.project_results['children'][1]['is_public'])
assert_true(self.project_results['children'][2]['is_public'])
assert_false(self.project_results['children'][3]['is_public'])
def test_editable_children_titles(self):
assert_equal(self.project_results['node']['title'], self.project.title)
assert_equal(self.project_results['children'][0]['title'], self.child.title)
assert_equal(self.project_results['children'][1]['title'], self.grandchild.title)
assert_equal(self.project_results['children'][2]['title'], self.great_grandchild.title)
assert_equal(self.project_results['children'][3]['title'], self.great_great_grandchild.title)
class TestChildrenViews(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
def test_get_children(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], child._primary_key)
def test_get_children_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
project.add_pointer(pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['title'], pointed.title)
pointer = Pointer.find_one(Q('node', 'eq', pointed))
assert_equal(nodes[0]['id'], pointer._primary_key)
def test_get_children_filter_for_permissions(self):
# self.user has admin access to this project
project = ProjectFactory(creator=self.user)
# self.user only has read access to this project, which project points
# to
read_only_pointed = ProjectFactory()
read_only_creator = read_only_pointed.creator
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
read_only_pointed.save()
# self.user only has read access to this project, which is a subproject
# of project
read_only = ProjectFactory()
read_only_pointed.add_contributor(self.user, auth=Auth(read_only_creator), permissions=['read'])
project.nodes.append(read_only)
# self.user adds a pointer to read_only
project.add_pointer(read_only_pointed, Auth(self.user))
project.save()
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 2)
url = project.api_url_for('get_children', permissions='write')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_children_rescale_ratio(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
rescale_ratio = res.json['rescale_ratio']
assert_is_instance(rescale_ratio, float)
assert_equal(rescale_ratio, _rescale_ratio(Auth(self.user), [child]))
def test_get_children_render_nodes_receives_auth(self):
project = ProjectFactory(creator=self.user)
NodeFactory(parent=project, creator=self.user)
url = project.api_url_for('get_children')
res = self.app.get(url, auth=self.user.auth)
perm = res.json['nodes'][0]['permissions']
assert_equal(perm, 'admin')
class TestUserProfile(OsfTestCase):
def setUp(self):
super(TestUserProfile, self).setUp()
self.user = AuthUserFactory()
def test_sanitization_of_edit_profile(self):
url = api_url_for('edit_profile', uid=self.user._id)
post_data = {'name': 'fullname', 'value': 'new<b> name</b> '}
request = self.app.post(url, post_data, auth=self.user.auth)
assert_equal('new name', request.json['name'])
def test_fmt_date_or_none(self):
with assert_raises(HTTPError) as cm:
#enter a date before 1900
fmt_date_or_none(dt.datetime(1890, 10, 31, 18, 23, 29, 227))
# error should be raised because date is before 1900
assert_equal(cm.exception.code, http.BAD_REQUEST)
def test_unserialize_social(self):
url = api_url_for('unserialize_social')
payload = {
'personal': 'http://frozen.pizza.com/reviews',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
self.app.put_json(
url,
payload,
auth=self.user.auth,
)
self.user.reload()
for key, value in payload.iteritems():
assert_equal(self.user.social[key], value)
assert_true(self.user.social['researcherId'] is None)
def test_unserialize_social_validation_failure(self):
url = api_url_for('unserialize_social')
# personal URL is invalid
payload = {
'personal': 'http://invalidurl',
'twitter': 'howtopizza',
'github': 'frozenpizzacode',
}
res = self.app.put_json(
url,
payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Invalid personal URL.')
def test_serialize_social_editable(self):
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_true(res.json['editable'])
def test_serialize_social_not_editable(self):
user2 = AuthUserFactory()
self.user.social['twitter'] = 'howtopizza'
self.user.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_equal(res.json.get('twitter'), 'howtopizza')
assert_true(res.json.get('github') is None)
assert_false(res.json['editable'])
def test_serialize_social_addons_editable(self):
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social')
res = self.app.get(
url,
auth=self.user.auth,
)
assert_equal(
res.json['addons']['github'],
'howtogithub'
)
def test_serialize_social_addons_not_editable(self):
user2 = AuthUserFactory()
self.user.add_addon('github')
user_github = self.user.get_addon('github')
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = 'testuser'
oauth_settings.save()
user_github.oauth_settings = oauth_settings
user_github.save()
user_github.github_user_name = 'howtogithub'
oauth_settings.save()
url = api_url_for('serialize_social', uid=self.user._id)
res = self.app.get(
url,
auth=user2.auth,
)
assert_not_in('addons', res.json)
def test_unserialize_and_serialize_jobs(self):
jobs = [{
'institution': 'an institution',
'department': 'a department',
'title': 'a title',
'startMonth': 'January',
'startYear': '2001',
'endMonth': 'March',
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'title': None,
'startMonth': 'May',
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.jobs), 2)
url = api_url_for('serialize_jobs')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(jobs):
assert_equal(job, res.json['contents'][i])
def test_unserialize_and_serialize_schools(self):
schools = [{
'institution': 'an institution',
'department': 'a department',
'degree': 'a degree',
'startMonth': 1,
'startYear': '2001',
'endMonth': 5,
'endYear': '2001',
'ongoing': False,
}, {
'institution': 'another institution',
'department': None,
'degree': None,
'startMonth': 5,
'startYear': '2001',
'endMonth': None,
'endYear': None,
'ongoing': True,
}]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(len(self.user.schools), 2)
url = api_url_for('serialize_schools')
res = self.app.get(
url,
auth=self.user.auth,
)
for i, job in enumerate(schools):
assert_equal(job, res.json['contents'][i])
def test_unserialize_jobs(self):
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# jobs field is updated
assert_equal(self.user.jobs, jobs)
def test_unserialize_names(self):
fake_fullname_w_spaces = ' {} '.format(fake.name())
names = {
'full': fake_fullname_w_spaces,
'given': 'Tea',
'middle': 'Gray',
'family': 'Pot',
'suffix': 'Ms.',
}
url = api_url_for('unserialize_names')
res = self.app.put_json(url, names, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# user is updated
assert_equal(self.user.fullname, fake_fullname_w_spaces.strip())
assert_equal(self.user.given_name, names['given'])
assert_equal(self.user.middle_names, names['middle'])
assert_equal(self.user.family_name, names['family'])
assert_equal(self.user.suffix, names['suffix'])
def test_unserialize_schools(self):
schools = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'degree': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': schools}
url = api_url_for('unserialize_schools')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# schools field is updated
assert_equal(self.user.schools, schools)
def test_unserialize_jobs_valid(self):
jobs_cached = self.user.jobs
jobs = [
{
'institution': fake.company(),
'department': fake.catch_phrase(),
'title': fake.bs(),
'startMonth': 5,
'startYear': '2013',
'endMonth': 3,
'endYear': '2014',
'ongoing': False,
}
]
payload = {'contents': jobs}
url = api_url_for('unserialize_jobs')
res = self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_get_current_user_gravatar_default_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
assert_true(current_user_gravatar is not None)
url = api_url_for('get_gravatar', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
my_user_gravatar = res.json['gravatar_url']
assert_equal(current_user_gravatar, my_user_gravatar)
def test_get_other_user_gravatar_default_size(self):
user2 = AuthUserFactory()
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_gravatar = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
user2_gravatar = res.json['gravatar_url']
assert_true(user2_gravatar is not None)
assert_not_equal(current_user_gravatar, user2_gravatar)
def test_get_current_user_gravatar_specific_size(self):
url = api_url_for('current_user_gravatar')
res = self.app.get(url, auth=self.user.auth)
current_user_default_gravatar = res.json['gravatar_url']
url = api_url_for('current_user_gravatar', size=11)
res = self.app.get(url, auth=self.user.auth)
current_user_small_gravatar = res.json['gravatar_url']
assert_true(current_user_small_gravatar is not None)
assert_not_equal(current_user_default_gravatar, current_user_small_gravatar)
def test_get_other_user_gravatar_specific_size(self):
user2 = AuthUserFactory()
url = api_url_for('get_gravatar', uid=user2._id)
res = self.app.get(url, auth=self.user.auth)
gravatar_default_size = res.json['gravatar_url']
url = api_url_for('get_gravatar', uid=user2._id, size=11)
res = self.app.get(url, auth=self.user.auth)
gravatar_small = res.json['gravatar_url']
assert_true(gravatar_small is not None)
assert_not_equal(gravatar_default_size, gravatar_small)
def test_update_user_timezone(self):
assert_equal(self.user.timezone, 'Etc/UTC')
payload = {'timezone': 'America/New_York', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.timezone, 'America/New_York')
def test_update_user_locale(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': 'de_DE', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'de_DE')
def test_update_user_locale_none(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': None, 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_update_user_locale_empty_string(self):
assert_equal(self.user.locale, 'en_US')
payload = {'locale': '', 'id': self.user._id}
url = api_url_for('update_user', uid=self.user._id)
self.app.put_json(url, payload, auth=self.user.auth)
self.user.reload()
assert_equal(self.user.locale, 'en_US')
def test_cannot_update_user_without_user_id(self):
user1 = AuthUserFactory()
url = api_url_for('update_user')
header = {'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_update_user_mailing_lists(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailing_lists[list_name] = True
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
mock_client.lists.unsubscribe.assert_called_with(
id=list_id,
email={'email': self.user.username}
)
mock_client.lists.subscribe.assert_called_with(
id=list_id,
email={'email': email},
merge_vars={
'fname': self.user.given_name,
'lname': self.user.family_name,
},
double_optin=False,
update_existing=True
)
handlers.celery_teardown_request()
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_unsubscribe_mailchimp_not_called_if_user_not_subscribed(self, mock_get_mailchimp_api, send_mail):
email = fake.email()
self.user.emails.append(email)
list_name = 'foo'
self.user.mailing_lists[list_name] = False
self.user.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
url = api_url_for('update_user', uid=self.user._id)
emails = [
{'address': self.user.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}]
payload = {'locale': '', 'id': self.user._id, 'emails': emails}
self.app.put_json(url, payload, auth=self.user.auth)
assert_equal(mock_client.lists.unsubscribe.call_count, 0)
assert_equal(mock_client.lists.subscribe.call_count, 0)
handlers.celery_teardown_request()
# TODO: Uncomment once outstanding issues with this feature are addressed
# def test_twitter_redirect_success(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# assert_equals(res.status_code, http.FOUND)
# assert_in(self.user.url, res.location)
# def test_twitter_redirect_is_case_insensitive(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# res1 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter']))
# res2 = self.app.get(web_url_for('redirect_to_twitter', twitter_handle=self.user.social['twitter'].lower()))
# assert_equal(res1.location, res2.location)
# def test_twitter_redirect_unassociated_twitter_handle_returns_404(self):
# unassociated_handle = fake.last_name()
# expected_error = 'There is no active user associated with the Twitter handle: {0}.'.format(unassociated_handle)
# res = self.app.get(
# web_url_for('redirect_to_twitter', twitter_handle=unassociated_handle),
# expect_errors=True
# )
# assert_equal(res.status_code, http.NOT_FOUND)
# assert_true(expected_error in res.body)
# def test_twitter_redirect_handle_with_multiple_associated_accounts_redirects_to_selection_page(self):
# self.user.social['twitter'] = fake.last_name()
# self.user.save()
# user2 = AuthUserFactory()
# user2.social['twitter'] = self.user.social['twitter']
# user2.save()
# expected_error = 'There are multiple OSF accounts associated with the Twitter handle: <strong>{0}</strong>.'.format(self.user.social['twitter'])
# res = self.app.get(
# web_url_for(
# 'redirect_to_twitter',
# twitter_handle=self.user.social['twitter'],
# expect_error=True
# )
# )
# assert_equal(res.status_code, http.MULTIPLE_CHOICES)
# assert_true(expected_error in res.body)
# assert_true(self.user.url in res.body)
# assert_true(user2.url in res.body)
class TestUserProfileApplicationsPage(OsfTestCase):
def setUp(self):
super(TestUserProfileApplicationsPage, self).setUp()
self.user = AuthUserFactory()
self.user2 = AuthUserFactory()
self.platform_app = ApiOAuth2ApplicationFactory(owner=self.user)
self.detail_url = web_url_for('oauth_application_detail', client_id=self.platform_app.client_id)
def test_non_owner_cant_access_detail_page(self):
res = self.app.get(self.detail_url, auth=self.user2.auth, expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_owner_cant_access_deleted_application(self):
self.platform_app.is_active = False
self.platform_app.save()
res = self.app.get(self.detail_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.GONE)
def test_owner_cant_access_nonexistent_application(self):
url = web_url_for('oauth_application_detail', client_id='nonexistent')
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.NOT_FOUND)
class TestUserAccount(OsfTestCase):
def setUp(self):
super(TestUserAccount, self).setUp()
self.user = AuthUserFactory()
self.user.set_password('password')
self.user.save()
@mock.patch('website.profile.views.push_status_message')
def test_password_change_valid(self, mock_push_status_message):
old_password = 'password'
new_password = 'Pa$$w0rd'
confirm_password = new_password
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=(self.user.username, old_password))
assert_true(302, res.status_code)
res = res.follow(auth=(self.user.username, new_password))
assert_true(200, res.status_code)
self.user.reload()
assert_true(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in('Password updated successfully', mock_push_status_message.mock_calls[0][1][0])
@mock.patch('website.profile.views.push_status_message')
def test_password_change_invalid(self, mock_push_status_message, old_password='', new_password='',
confirm_password='', error_message='Old password is invalid'):
url = web_url_for('user_account_password')
post_data = {
'old_password': old_password,
'new_password': new_password,
'confirm_password': confirm_password,
}
res = self.app.post(url, post_data, auth=self.user.auth)
assert_true(302, res.status_code)
res = res.follow(auth=self.user.auth)
assert_true(200, res.status_code)
self.user.reload()
assert_false(self.user.check_password(new_password))
assert_true(mock_push_status_message.called)
assert_in(error_message, mock_push_status_message.mock_calls[0][1][0])
def test_password_change_invalid_old_password(self):
self.test_password_change_invalid(
old_password='invalid old password',
new_password='new password',
confirm_password='new password',
error_message='Old password is invalid',
)
def test_password_change_invalid_confirm_password(self):
self.test_password_change_invalid(
old_password='password',
new_password='new password',
confirm_password='invalid confirm password',
error_message='Password does not match the confirmation',
)
def test_password_change_invalid_new_password_length(self):
self.test_password_change_invalid(
old_password='password',
new_password='12345',
confirm_password='12345',
error_message='Password should be at least six characters',
)
def test_password_change_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_password_change_invalid(
old_password=old_password,
new_password=new_password,
confirm_password=confirm_password,
error_message='Passwords cannot be blank',
)
def test_password_change_invalid_blank_new_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', password, 'new password')
def test_password_change_invalid_blank_confirm_password(self):
for password in ('', ' '):
self.test_password_change_invalid_blank_password('password', 'new password', password)
class TestAddingContributorViews(OsfTestCase):
def setUp(self):
super(TestAddingContributorViews, self).setUp()
ensure_schemas()
self.creator = AuthUserFactory()
self.project = ProjectFactory(creator=self.creator)
# Authenticate all requests
self.app.authenticate(*self.creator.auth)
contributor_added.connect(notify_added_contributor)
def test_serialize_unregistered_without_record(self):
name, email = fake.name(), fake.email()
res = serialize_unregistered(fullname=name, email=email)
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
assert_equal(res['id'], None)
assert_false(res['registered'])
assert_true(res['gravatar'])
assert_false(res['active'])
def test_deserialize_contributors(self):
contrib = UserFactory()
unreg = UnregUserFactory()
name, email = fake.name(), fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [
add_contributor_json(contrib),
serialize_unregistered(fake.name(), unreg.username),
unreg_no_record
]
contrib_data[0]['permission'] = 'admin'
contrib_data[1]['permission'] = 'write'
contrib_data[2]['permission'] = 'read'
contrib_data[0]['visible'] = True
contrib_data[1]['visible'] = True
contrib_data[2]['visible'] = True
res = deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator))
assert_equal(len(res), len(contrib_data))
assert_true(res[0]['user'].is_registered)
assert_false(res[1]['user'].is_registered)
assert_true(res[1]['user']._id)
assert_false(res[2]['user'].is_registered)
assert_true(res[2]['user']._id)
def test_deserialize_contributors_validates_fullname(self):
name = "<img src=1 onerror=console.log(1)>"
email = fake.email()
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
def test_deserialize_contributors_validates_email(self):
name = fake.name()
email = "!@#$%%^&*"
unreg_no_record = serialize_unregistered(name, email)
contrib_data = [unreg_no_record]
contrib_data[0]['permission'] = 'admin'
contrib_data[0]['visible'] = True
with assert_raises(ValidationError):
deserialize_contributors(
self.project,
contrib_data,
auth=Auth(self.creator),
validate=True)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_deserialize_contributors_sends_unreg_contributor_added_signal(self, _):
unreg = UnregUserFactory()
from website.project.signals import unreg_contributor_added
serialized = [serialize_unregistered(fake.name(), unreg.username)]
serialized[0]['visible'] = True
with capture_signals() as mock_signals:
deserialize_contributors(self.project, serialized,
auth=Auth(self.creator))
assert_equal(mock_signals.signals_sent(), set([unreg_contributor_added]))
def test_serialize_unregistered_with_record(self):
name, email = fake.name(), fake.email()
user = self.project.add_unregistered_contributor(fullname=name,
email=email, auth=Auth(self.project.creator))
self.project.save()
res = serialize_unregistered(
fullname=name,
email=email
)
assert_false(res['active'])
assert_false(res['registered'])
assert_equal(res['id'], user._primary_key)
assert_true(res['gravatar_url'])
assert_equal(res['fullname'], name)
assert_equal(res['email'], email)
def test_add_contributor_with_unreg_contribs_and_reg_contribs(self):
n_contributors_pre = len(self.project.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.contributors),
n_contributors_pre + len(payload['users']))
new_unreg = auth.get_user(email=email)
assert_false(new_unreg.is_registered)
# unclaimed record was added
new_unreg.reload()
assert_in(self.project._primary_key, new_unreg.unclaimed_records)
rec = new_unreg.get_unclaimed_record(self.project._primary_key)
assert_equal(rec['name'], name)
assert_equal(rec['email'], email)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_add_contributors_post_only_sends_one_email_to_unreg_user(
self, mock_send_claim_email):
# Project has s
comp1, comp2 = NodeFactory(
creator=self.creator), NodeFactory(creator=self.creator)
self.project.nodes.append(comp1)
self.project.nodes.append(comp2)
self.project.save()
# An unreg user is added to the project AND its components
unreg_user = { # dict because user has not previous unreg record
'id': None,
'registered': False,
'fullname': fake.name(),
'email': fake.email(),
'permission': 'admin',
'visible': True,
}
payload = {
'users': [unreg_user],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert_true(self.project.can_edit(user=self.creator))
self.app.post_json(url, payload, auth=self.creator.auth)
# finalize_invitation should only have been called once
assert_equal(mock_send_claim_email.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_only_sends_one_email_to_registered_user(self, mock_send_mail):
# Project has components
comp1 = NodeFactory(creator=self.creator, parent=self.project)
comp2 = NodeFactory(creator=self.creator, parent=self.project)
# A registered user is added to the project AND its components
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [comp1._primary_key, comp2._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail should only have been called once
assert_equal(mock_send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_add_contributors_post_sends_email_if_user_not_contributor_on_parent_node(self, mock_send_mail):
# Project has a component with a sub-component
component = NodeFactory(creator=self.creator, parent=self.project)
sub_component = NodeFactory(creator=self.creator, parent=component)
# A registered user is added to the project and the sub-component, but NOT the component
user = UserFactory()
user_dict = {
'id': user._id,
'fullname': user.fullname,
'email': user.username,
'permission': 'write',
'visible': True}
payload = {
'users': [user_dict],
'node_ids': [sub_component._primary_key]
}
# send request
url = self.project.api_url_for('project_contributors_post')
assert self.project.can_edit(user=self.creator)
self.app.post_json(url, payload, auth=self.creator.auth)
# send_mail is called for both the project and the sub-component
assert_equal(mock_send_mail.call_count, 2)
@mock.patch('website.project.views.contributor.send_claim_email')
def test_email_sent_when_unreg_user_is_added(self, send_mail):
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
payload = {
'users': [pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
assert_true(send_mail.called)
assert_true(send_mail.called_with(email=email))
@mock.patch('website.mails.send_mail')
def test_email_sent_when_reg_user_is_added(self, send_mail):
contributor = UserFactory()
contributors = [{
'user': contributor,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=Auth(self.project.creator))
project.save()
assert_true(send_mail.called)
send_mail.assert_called_with(
contributor.username,
mails.CONTRIBUTOR_ADDED,
user=contributor,
node=project)
assert_equal(contributor.contributor_added_email_records[project._id]['last_sent'], int(time.time()))
@mock.patch('website.mails.send_mail')
def test_contributor_added_email_not_sent_to_unreg_user(self, send_mail):
unreg_user = UnregUserFactory()
contributors = [{
'user': unreg_user,
'visible': True,
'permissions': ['read', 'write']
}]
project = ProjectFactory()
project.add_contributors(contributors, auth=Auth(self.project.creator))
project.save()
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_forking_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.fork_node(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_templating_project_does_not_send_contributor_added_email(self, send_mail):
project = ProjectFactory()
project.use_as_template(auth=Auth(project.creator))
assert_false(send_mail.called)
@mock.patch('website.archiver.tasks.archive')
@mock.patch('website.mails.send_mail')
def test_registering_project_does_not_send_contributor_added_email(self, send_mail, mock_archive):
project = ProjectFactory()
project.register_node(None, Auth(user=project.creator), '', None)
assert_false(send_mail.called)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_does_not_send_before_throttle_expires(self, send_mail):
contributor = UserFactory()
project = ProjectFactory()
notify_added_contributor(project, contributor)
assert_true(send_mail.called)
# 2nd call does not send email because throttle period has not expired
notify_added_contributor(project, contributor)
assert_equal(send_mail.call_count, 1)
@mock.patch('website.mails.send_mail')
def test_notify_contributor_email_sends_after_throttle_expires(self, send_mail):
throttle = 0.5
contributor = UserFactory()
project = ProjectFactory()
notify_added_contributor(project, contributor, throttle=throttle)
assert_true(send_mail.called)
time.sleep(1) # throttle period expires
notify_added_contributor(project, contributor, throttle=throttle)
assert_equal(send_mail.call_count, 2)
def test_add_multiple_contributors_only_adds_one_log(self):
n_logs_pre = len(self.project.logs)
reg_user = UserFactory()
name = fake.name()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': fake.email(),
'permission': 'write',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': []
}
url = self.project.api_url_for('project_contributors_post')
self.app.post_json(url, payload).maybe_follow()
self.project.reload()
assert_equal(len(self.project.logs), n_logs_pre + 1)
def test_add_contribs_to_multiple_nodes(self):
child = NodeFactory(parent=self.project, creator=self.creator)
n_contributors_pre = len(child.contributors)
reg_user = UserFactory()
name, email = fake.name(), fake.email()
pseudouser = {
'id': None,
'registered': False,
'fullname': name,
'email': email,
'permission': 'admin',
'visible': True,
}
reg_dict = add_contributor_json(reg_user)
reg_dict['permission'] = 'admin'
reg_dict['visible'] = True
payload = {
'users': [reg_dict, pseudouser],
'node_ids': [self.project._primary_key, child._primary_key]
}
url = "/api/v1/project/{0}/contributors/".format(self.project._id)
self.app.post_json(url, payload).maybe_follow()
child.reload()
assert_equal(len(child.contributors),
n_contributors_pre + len(payload['users']))
def tearDown(self):
super(TestAddingContributorViews, self).tearDown()
contributor_added.disconnect(notify_added_contributor)
class TestUserInviteViews(OsfTestCase):
def setUp(self):
super(TestUserInviteViews, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.invite_url = '/api/v1/project/{0}/invite_contributor/'.format(
self.project._primary_key)
def test_invite_contributor_post_if_not_in_db(self):
name, email = fake.name(), fake.email()
res = self.app.post_json(
self.invite_url,
{'fullname': name, 'email': email},
auth=self.user.auth,
)
contrib = res.json['contributor']
assert_true(contrib['id'] is None)
assert_equal(contrib['fullname'], name)
assert_equal(contrib['email'], email)
def test_invite_contributor_post_if_unreg_already_in_db(self):
# A n unreg user is added to a different project
name, email = fake.name(), fake.email()
project2 = ProjectFactory()
unreg_user = project2.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project2.creator))
project2.save()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': email}, auth=self.user.auth)
expected = add_contributor_json(unreg_user)
expected['fullname'] = name
expected['email'] = email
assert_equal(res.json['contributor'], expected)
def test_invite_contributor_post_if_emaiL_already_registered(self):
reg_user = UserFactory()
# Tries to invite user that is already regiestered
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': reg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_post_if_user_is_already_contributor(self):
unreg_user = self.project.add_unregistered_contributor(
fullname=fake.name(), email=fake.email(),
auth=Auth(self.project.creator)
)
self.project.save()
# Tries to invite unreg user that is already a contributor
res = self.app.post_json(self.invite_url,
{'fullname': fake.name(), 'email': unreg_user.username},
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invite_contributor_with_no_email(self):
name = fake.name()
res = self.app.post_json(self.invite_url,
{'fullname': name, 'email': None}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
data = res.json
assert_equal(data['status'], 'success')
assert_equal(data['contributor']['fullname'], name)
assert_true(data['contributor']['email'] is None)
assert_false(data['contributor']['registered'])
def test_invite_contributor_requires_fullname(self):
res = self.app.post_json(self.invite_url,
{'email': 'brian@queen.com', 'fullname': ''}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_given_email(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=given_email, user=unreg_user, node=project)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=given_email,
mail=mails.INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_to_referrer(self, send_mail):
project = ProjectFactory()
referrer = project.creator
given_email, real_email = fake.email(), fake.email()
unreg_user = project.add_unregistered_contributor(fullname=fake.name(),
email=given_email, auth=Auth(
referrer)
)
project.save()
send_claim_email(email=real_email, user=unreg_user, node=project)
assert_true(send_mail.called)
# email was sent to referrer
assert_true(send_mail.called_with(
to_addr=referrer.username,
mail=mails.FORWARD_INVITE
))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_email_before_throttle_expires(self, send_mail):
project = ProjectFactory()
given_email = fake.email()
unreg_user = project.add_unregistered_contributor(
fullname=fake.name(),
email=given_email,
auth=Auth(project.creator),
)
project.save()
send_claim_email(email=fake.email(), user=unreg_user, node=project)
# 2nd call raises error because throttle hasn't expired
with assert_raises(HTTPError):
send_claim_email(email=fake.email(), user=unreg_user, node=project)
send_mail.assert_not_called()
class TestClaimViews(OsfTestCase):
def setUp(self):
super(TestClaimViews, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
self.given_name = fake.name()
self.given_email = fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=self.given_name,
email=self.given_email,
auth=Auth(user=self.referrer)
)
self.project.save()
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_with_registered_user_id(self, send_mail):
# registered user who is attempting to claim the unclaimed contributor
reg_user = UserFactory()
payload = {
# pk of unreg user record
'pk': self.user._primary_key,
'claimerId': reg_user._primary_key
}
url = '/api/v1/user/{uid}/{pid}/claim/email/'.format(
uid=self.user._primary_key,
pid=self.project._primary_key,
)
res = self.app.post_json(url, payload)
# mail was sent
assert_true(send_mail.called)
# ... to the correct address
assert_true(send_mail.called_with(to_addr=self.given_email))
# view returns the correct JSON
assert_equal(res.json, {
'status': 'success',
'email': reg_user.username,
'fullname': self.given_name,
})
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project
)
mock_send_mail.assert_called()
assert_equal(mock_send_mail.call_count, 2)
first_call_args = mock_send_mail.call_args_list[0][0]
assert_equal(first_call_args[0], self.referrer.username)
second_call_args = mock_send_mail.call_args_list[1][0]
assert_equal(second_call_args[0], reg_user.username)
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_send_claim_registered_email_before_throttle_expires(self, mock_send_mail):
reg_user = UserFactory()
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
# second call raises error because it was called before throttle period
with assert_raises(HTTPError):
send_claim_registered_email(
claimer=reg_user,
unreg_user=self.user,
node=self.project,
)
mock_send_mail.assert_not_called()
@mock.patch('website.project.views.contributor.send_claim_registered_email')
def test_claim_user_post_with_email_already_registered_sends_correct_email(
self, send_claim_registered_email):
reg_user = UserFactory()
payload = {
'value': reg_user.username,
'pk': self.user._primary_key
}
url = self.project.api_url_for('claim_user_post', uid=self.user._id)
self.app.post_json(url, payload)
assert_true(send_claim_registered_email.called)
def test_user_with_removed_unclaimed_url_claiming(self):
""" Tests that when an unclaimed user is removed from a project, the
unregistered user object does not retain the token.
"""
self.project.remove_contributor(self.user, Auth(user=self.referrer))
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_user_with_claim_url_cannot_claim_twice(self):
""" Tests that when an unclaimed user is replaced on a project with a
claimed user, the unregistered user object does not retain the token.
"""
reg_user = AuthUserFactory()
self.project.replace_contributor(self.user, reg_user)
assert_not_in(
self.project._primary_key,
self.user.unclaimed_records.keys()
)
def test_claim_user_form_redirects_to_password_confirm_page_if_user_is_logged_in(self):
reg_user = AuthUserFactory()
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url, auth=reg_user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=reg_user.auth)
token = self.user.get_unclaimed_record(self.project._primary_key)['token']
expected = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token=token,
)
assert_equal(res.request.path, expected)
def test_get_valid_form(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(url).maybe_follow()
assert_equal(res.status_code, 200)
def test_invalid_claim_form_redirects_to_register_page(self):
uid = self.user._primary_key
pid = self.project._primary_key
url = '/user/{uid}/{pid}/claim/?token=badtoken'.format(**locals())
res = self.app.get(url, expect_errors=True).maybe_follow()
assert_equal(res.status_code, 200)
assert_equal(res.request.path, web_url_for('auth_login'))
def test_posting_to_claim_form_with_valid_data(self):
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.post(url, {
'username': self.user.username,
'password': 'killerqueen',
'password2': 'killerqueen'
}).maybe_follow()
assert_equal(res.status_code, 200)
self.user.reload()
assert_true(self.user.is_registered)
assert_true(self.user.is_active)
assert_not_in(self.project._primary_key, self.user.unclaimed_records)
def test_posting_to_claim_form_removes_all_unclaimed_data(self):
# user has multiple unclaimed records
p2 = ProjectFactory(creator=self.referrer)
self.user.add_unclaimed_record(node=p2, referrer=self.referrer,
given_name=fake.name())
self.user.save()
assert_true(len(self.user.unclaimed_records.keys()) > 1) # sanity check
url = self.user.get_claim_url(self.project._primary_key)
self.app.post(url, {
'username': self.given_email,
'password': 'bohemianrhap',
'password2': 'bohemianrhap'
})
self.user.reload()
assert_equal(self.user.unclaimed_records, {})
def test_posting_to_claim_form_sets_fullname_to_given_name(self):
# User is created with a full name
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
# User invited with a different name
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.project.creator),
)
self.project.save()
# Goes to claim url
claim_url = new_user.get_claim_url(self.project._id)
self.app.post(claim_url, {
'username': unreg.username,
'password': 'killerqueen', 'password2': 'killerqueen'
})
unreg.reload()
# Full name was set correctly
assert_equal(unreg.fullname, different_name)
# CSL names were set correctly
parsed_name = impute_names_model(different_name)
assert_equal(unreg.given_name, parsed_name['given_name'])
assert_equal(unreg.family_name, parsed_name['family_name'])
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_returns_fullname(self, send_mail):
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
res = self.app.post_json(url,
{'value': self.given_email,
'pk': self.user._primary_key},
auth=self.referrer.auth)
assert_equal(res.json['fullname'], self.given_name)
assert_true(send_mail.called)
assert_true(send_mail.called_with(to_addr=self.given_email))
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_claim_user_post_if_email_is_different_from_given_email(self, send_mail):
email = fake.email() # email that is different from the one the referrer gave
url = '/api/v1/user/{0}/{1}/claim/email/'.format(self.user._primary_key,
self.project._primary_key)
self.app.post_json(url,
{'value': email, 'pk': self.user._primary_key}
)
assert_true(send_mail.called)
assert_equal(send_mail.call_count, 2)
call_to_invited = send_mail.mock_calls[0]
assert_true(call_to_invited.called_with(
to_addr=email
))
call_to_referrer = send_mail.mock_calls[1]
assert_true(call_to_referrer.called_with(
to_addr=self.given_email
))
def test_claim_url_with_bad_token_returns_400(self):
url = self.project.web_url_for(
'claim_user_registered',
uid=self.user._id,
token='badtoken',
)
res = self.app.get(url, auth=self.referrer.auth, expect_errors=400)
assert_equal(res.status_code, 400)
def test_cannot_claim_user_with_user_who_is_already_contributor(self):
# user who is already a contirbutor to the project
contrib = AuthUserFactory()
self.project.add_contributor(contrib, auth=Auth(self.project.creator))
self.project.save()
# Claiming user goes to claim url, but contrib is already logged in
url = self.user.get_claim_url(self.project._primary_key)
res = self.app.get(
url,
auth=contrib.auth,
).follow(
auth=contrib.auth,
expect_errors=True,
)
# Response is a 400
assert_equal(res.status_code, 400)
class TestWatchViews(OsfTestCase):
def setUp(self):
super(TestWatchViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.auth = self.user.auth # used for requests auth
# A public project
self.project = ProjectFactory(is_public=True)
self.project.save()
# Manually reset log date to 100 days ago so it won't show up in feed
self.project.logs[0].date = dt.datetime.utcnow() - dt.timedelta(days=100)
self.project.logs[0].save()
# A log added now
self.last_log = self.project.add_log(
NodeLog.TAG_ADDED,
params={'node': self.project._primary_key},
auth=self.consolidate_auth,
log_date=dt.datetime.utcnow(),
save=True,
)
# Clear watched list
self.user.watched = []
self.user.save()
def test_watching_a_project_appends_to_users_watched_list(self):
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/watch/'.format(self.project._id)
res = self.app.post_json(url,
params={"digest": True},
auth=self.auth)
assert_equal(res.json['watchCount'], 1)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then + 1)
assert_true(self.user.watched[-1].digest)
def test_watching_project_twice_returns_400(self):
url = "/api/v1/project/{0}/watch/".format(self.project._id)
res = self.app.post_json(url,
params={},
auth=self.auth)
assert_equal(res.status_code, 200)
# User tries to watch a node she's already watching
res2 = self.app.post_json(url,
params={},
auth=self.auth,
expect_errors=True)
assert_equal(res2.status_code, http.BAD_REQUEST)
def test_unwatching_a_project_removes_from_watched_list(self):
# The user has already watched a project
watch_config = WatchConfigFactory(node=self.project)
self.user.watch(watch_config)
self.user.save()
n_watched_then = len(self.user.watched)
url = '/api/v1/project/{0}/unwatch/'.format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
self.user.reload()
n_watched_now = len(self.user.watched)
assert_equal(res.status_code, 200)
assert_equal(n_watched_now, n_watched_then - 1)
assert_false(self.user.is_watching(self.project))
def test_toggle_watch(self):
# The user is not watching project
assert_false(self.user.is_watching(self.project))
url = "/api/v1/project/{0}/togglewatch/".format(self.project._id)
res = self.app.post_json(url, {}, auth=self.auth)
# The response json has a watchcount and watched property
assert_equal(res.json['watchCount'], 1)
assert_true(res.json['watched'])
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the project
assert_true(res.json['watched'])
assert_true(self.user.is_watching(self.project))
def test_toggle_watch_node(self):
# The project has a public sub-node
node = NodeFactory(creator=self.user, parent=self.project, is_public=True)
url = "/api/v1/project/{}/node/{}/togglewatch/".format(self.project._id,
node._id)
res = self.app.post_json(url, {}, auth=self.auth)
assert_equal(res.status_code, 200)
self.user.reload()
# The user is now watching the sub-node
assert_true(res.json['watched'])
assert_true(self.user.is_watching(node))
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = "/api/v1/watched/logs/"
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
res = self.app.get(url, auth=self.auth)
assert_equal(len(res.json['logs']), 10)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], 0)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs(self):
project = ProjectFactory()
# Add some logs
for _ in range(12):
project.logs.append(NodeLogFactory(user=self.user, action="file_added"))
project.save()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
page = 1
res = self.app.get(url, {'page': page}, auth=self.auth)
assert_equal(len(res.json['logs']), 3)
# 1 project create log then 12 generated logs
assert_equal(res.json['total'], 12 + 1)
assert_equal(res.json['page'], page)
assert_equal(res.json['pages'], 2)
assert_equal(res.json['logs'][0]['action'], 'file_added')
def test_get_more_watched_logs_invalid_page(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_page = 'invalid page'
res = self.app.get(
url, {'page': invalid_page}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "page".'
)
def test_get_more_watched_logs_invalid_size(self):
project = ProjectFactory()
watch_cfg = WatchConfigFactory(node=project)
self.user.watch(watch_cfg)
self.user.save()
url = api_url_for("watched_logs_get")
invalid_size = 'invalid size'
res = self.app.get(
url, {'size': invalid_size}, auth=self.auth, expect_errors=True
)
assert_equal(res.status_code, 400)
assert_equal(
res.json['message_long'],
'Invalid value for "size".'
)
class TestPointerViews(OsfTestCase):
def setUp(self):
super(TestPointerViews, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1109
def test_get_pointed_excludes_folders(self):
pointer_project = ProjectFactory(is_public=True) # project that points to another project
pointed_project = ProjectFactory(creator=self.user) # project that other project points to
pointer_project.add_pointer(pointed_project, Auth(pointer_project.creator), save=True)
# Project is in a dashboard folder
folder = FolderFactory(creator=pointed_project.creator)
folder.add_pointer(pointed_project, Auth(pointed_project.creator), save=True)
url = pointed_project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
# pointer_project's id is included in response, but folder's id is not
pointer_ids = [each['id'] for each in res.json['pointed']]
assert_in(pointer_project._id, pointer_ids)
assert_not_in(folder._id, pointer_ids)
def test_add_pointers(self):
url = self.project.api_url + 'pointer/'
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_the_same_pointer_more_than_once(self):
url = self.project.api_url + 'pointer/'
double_node = NodeFactory()
self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
)
res = self.app.post_json(
url,
{'nodeIds': [double_node._id]},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_add_pointers_no_user_logg_in(self):
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
res = self.app.post_json(
url,
{'nodeIds': node_ids},
auth=None,
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_add_pointers_public_non_contributor(self):
project2 = ProjectFactory()
project2.set_privacy('public')
project2.save()
url = self.project.api_url_for('add_pointers')
self.app.post_json(
url,
{'nodeIds': [project2._id]},
auth=self.user.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
1
)
def test_add_pointers_contributor(self):
user2 = AuthUserFactory()
self.project.add_contributor(user2)
self.project.save()
url = self.project.api_url_for('add_pointers')
node_ids = [
NodeFactory()._id
for _ in range(5)
]
self.app.post_json(
url,
{'nodeIds': node_ids},
auth=user2.auth,
).maybe_follow()
self.project.reload()
assert_equal(
len(self.project.nodes),
5
)
def test_add_pointers_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.post_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_move_pointers(self):
project_two = ProjectFactory(creator=self.user)
url = api_url_for('move_pointers')
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
assert_equal(len(self.project.nodes), 1)
assert_equal(len(project_two.nodes), 0)
user_auth = self.user.auth
move_request = \
{
'fromNodeId': self.project._id,
'toNodeId': project_two._id,
'pointerIds': [pointer.node._id],
}
self.app.post_json(
url,
move_request,
auth=user_auth,
).maybe_follow()
self.project.reload()
project_two.reload()
assert_equal(len(self.project.nodes), 0)
assert_equal(len(project_two.nodes), 1)
def test_remove_pointer(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
)
self.project.reload()
assert_equal(
len(self.project.nodes),
0
)
def test_remove_pointer_not_provided(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(url, {}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_found(self):
url = self.project.api_url + 'pointer/'
res = self.app.delete_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_remove_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.delete_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory(creator=self.user)
pointer = self.project.add_pointer(node, auth=self.consolidate_auth)
self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth
)
def test_fork_pointer_not_provided(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(url, {}, auth=self.user.auth,
expect_errors=True)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_found(self):
url = self.project.api_url + 'pointer/fork/'
res = self.app.post_json(
url,
{'pointerId': None},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_fork_pointer_not_in_nodes(self):
url = self.project.api_url + 'pointer/fork/'
node = NodeFactory()
pointer = Pointer(node=node)
res = self.app.post_json(
url,
{'pointerId': pointer._id},
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_before_register_with_pointer(self):
"Assert that link warning appears in before register callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 1)
def test_before_fork_with_pointer(self):
"Assert that link warning appears in before fork callback."
node = NodeFactory()
self.project.add_pointer(node, auth=self.consolidate_auth)
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 1)
def test_before_register_no_pointer(self):
"Assert that link warning does not appear in before register callback."
url = self.project.api_url + 'fork/before/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your fork' in prompt
]
assert_equal(len(prompts), 0)
def test_before_fork_no_pointer(self):
"""Assert that link warning does not appear in before fork callback.
"""
url = self.project.api_url + 'beforeregister/'
res = self.app.get(url, auth=self.user.auth).maybe_follow()
prompts = [
prompt
for prompt in res.json['prompts']
if 'Links will be copied into your registration' in prompt
]
assert_equal(len(prompts), 0)
def test_get_pointed(self):
pointing_node = ProjectFactory(creator=self.user)
pointing_node.add_pointer(self.project, auth=Auth(self.user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], pointing_node.url)
assert_equal(pointed[0]['title'], pointing_node.title)
assert_equal(pointed[0]['authorShort'], abbrev_authors(pointing_node))
def test_get_pointed_private(self):
secret_user = UserFactory()
pointing_node = ProjectFactory(creator=secret_user)
pointing_node.add_pointer(self.project, auth=Auth(secret_user))
url = self.project.api_url_for('get_pointed')
res = self.app.get(url, auth=self.user.auth)
pointed = res.json['pointed']
assert_equal(len(pointed), 1)
assert_equal(pointed[0]['url'], None)
assert_equal(pointed[0]['title'], 'Private Component')
assert_equal(pointed[0]['authorShort'], 'Private Author(s)')
class TestPublicViews(OsfTestCase):
def test_explore(self):
res = self.app.get("/explore/").maybe_follow()
assert_equal(res.status_code, 200)
def test_forgot_password_get(self):
res = self.app.get(web_url_for('forgot_password_get'))
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
class TestAuthViews(OsfTestCase):
def setUp(self):
super(TestAuthViews, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_merge_user(self):
dupe = UserFactory(
username="copy@cat.com",
emails=['copy@cat.com']
)
dupe.set_password("copycat")
dupe.save()
url = "/api/v1/user/merge/"
self.app.post_json(
url,
{
"merged_username": "copy@cat.com",
"merged_password": "copycat"
},
auth=self.auth,
)
self.user.reload()
dupe.reload()
assert_true(dupe.is_merged)
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_sends_confirm_email(self, send_mail):
url = '/register/'
self.app.post(url, {
'register-fullname': 'Freddie Mercury',
'register-username': 'fred@queen.com',
'register-password': 'killerqueen',
'register-username2': 'fred@queen.com',
'register-password2': 'killerqueen',
})
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr='fred@queen.com'
))
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_ok(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2902
@mock.patch('framework.auth.views.mails.send_mail')
def test_register_email_case_insensitive(self, _):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': str(email).upper(),
'password': password,
}
)
user = User.find_one(Q('username', 'eq', email))
assert_equal(user.fullname, name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_scrubs_username(self, _):
url = api_url_for('register_user')
name = "<i>Eunice</i> O' \"Cornwallis\"<script type='text/javascript' src='http://www.cornify.com/js/cornify.js'></script><script type='text/javascript'>cornify_add()</script>"
email, password = fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
expected_scrub_username = "Eunice O' \"Cornwallis\"cornify_add()"
user = User.find_one(Q('username', 'eq', email))
assert_equal(res.status_code, http.OK)
assert_equal(user.fullname, expected_scrub_username)
def test_register_email_mismatch(self):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
res = self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email + 'lol',
'password': password,
},
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
users = User.find(Q('username', 'eq', email))
assert_equal(users.count(), 0)
def test_register_after_being_invited_as_unreg_contributor(self):
# Regression test for:
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/861
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1021
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/1026
# A user is invited as an unregistered contributor
project = ProjectFactory()
name, email = fake.name(), fake.email()
project.add_unregistered_contributor(fullname=name, email=email,
auth=Auth(project.creator))
project.save()
# The new, unregistered user
new_user = User.find_one(Q('username', 'eq', email))
# Instead of following the invitation link, they register at the regular
# registration page
# They use a different name when they register, but same email
real_name = fake.name()
password = 'myprecious'
url = api_url_for('register_user')
payload = {
'fullName': real_name,
'email1': email,
'email2': email,
'password': password,
}
# Send registration request
self.app.post_json(url, payload)
new_user.reload()
# New user confirms by following confirmation link
confirm_url = new_user.get_confirmation_url(email, external=False)
self.app.get(confirm_url)
new_user.reload()
# Password and fullname should be updated
assert_true(new_user.is_confirmed)
assert_true(new_user.check_password(password))
assert_equal(new_user.fullname, real_name)
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_sends_user_registered_signal(self, mock_send_confirm_email):
url = api_url_for('register_user')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post_json(
url,
{
'fullName': name,
'email1': email,
'email2': email,
'password': password,
}
)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
mock_send_confirm_email.assert_called()
@mock.patch('framework.auth.views.send_confirm_email')
def test_register_post_sends_user_registered_signal(self, mock_send_confirm_email):
url = web_url_for('auth_register_post')
name, email, password = fake.name(), fake.email(), 'underpressure'
with capture_signals() as mock_signals:
self.app.post(url, {
'register-fullname': name,
'register-username': email,
'register-password': password,
'register-username2': email,
'register-password2': password
})
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_registered]))
mock_send_confirm_email.assert_called()
def test_resend_confirmation_get(self):
res = self.app.get('/resend/')
assert_equal(res.status_code, 200)
@mock.patch('framework.auth.views.mails.send_mail')
def test_resend_confirmation(self, send_mail):
email = 'test@example.com'
token = self.user.add_unconfirmed_email(email)
self.user.save()
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth)
assert_true(send_mail.called)
assert_true(send_mail.called_with(
to_addr=email
))
self.user.reload()
assert_not_equal(token, self.user.get_confirmation_token(email))
with assert_raises(InvalidTokenError):
self.user._get_unconfirmed_email_for_token(token)
def test_resend_confirmation_without_user_id(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': False}
res = self.app.put_json(url, {'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], '"id" is required')
def test_resend_confirmation_without_email(self):
url = api_url_for('resend_confirmation')
res = self.app.put_json(url, {'id': self.user._id}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_resend_confirmation_not_work_for_primary_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': True, 'confirmed': False}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_resend_confirmation_not_work_for_confirmed_email(self):
email = 'test@example.com'
url = api_url_for('resend_confirmation')
header = {'address': email, 'primary': False, 'confirmed': True}
res = self.app.put_json(url, {'id': self.user._id, 'email': header}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['message_long'], 'Cannnot resend confirmation for confirmed emails')
def test_confirm_email_clears_unclaimed_records_and_revokes_token(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
# sanity check
assert_equal(len(unclaimed_user.email_verifications.keys()), 1)
# user goes to email confirmation link
token = unclaimed_user.get_confirmation_token(unclaimed_user.username)
url = web_url_for('confirm_email_get', uid=unclaimed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
# unclaimed records and token are cleared
unclaimed_user.reload()
assert_equal(unclaimed_user.unclaimed_records, {})
assert_equal(len(unclaimed_user.email_verifications.keys()), 0)
def test_confirmation_link_registers_user(self):
user = User.create_unconfirmed('brian@queen.com', 'bicycle123', 'Brian May')
assert_false(user.is_registered) # sanity check
user.save()
confirmation_url = user.get_confirmation_url('brian@queen.com', external=False)
res = self.app.get(confirmation_url)
assert_equal(res.status_code, 302, 'redirects to settings page')
res = res.follow()
user.reload()
assert_true(user.is_registered)
# TODO: Use mock add-on
class TestAddonUserViews(OsfTestCase):
def setUp(self):
super(TestAddonUserViews, self).setUp()
self.user = AuthUserFactory()
def test_choose_addons_add(self):
"""Add add-ons; assert that add-ons are attached to project.
"""
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.user.reload()
assert_true(self.user.get_addon('github'))
def test_choose_addons_remove(self):
# Add, then delete, add-ons; assert that add-ons are not attached to
# project.
url = '/api/v1/settings/addons/'
self.app.post_json(
url,
{'github': True},
auth=self.user.auth,
).maybe_follow()
self.app.post_json(
url,
{'github': False},
auth=self.user.auth
).maybe_follow()
self.user.reload()
assert_false(self.user.get_addon('github'))
class TestConfigureMailingListViews(OsfTestCase):
@classmethod
def setUpClass(cls):
super(TestConfigureMailingListViews, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
@unittest.skipIf(settings.USE_CELERY, 'Subscription must happen synchronously for this test')
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_user_choose_mailing_lists_updates_user_dict(self, mock_get_mailchimp_api):
user = AuthUserFactory()
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 1, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
payload = {settings.MAILCHIMP_GENERAL_LIST: True}
url = api_url_for('user_choose_mailing_lists')
res = self.app.post_json(url, payload, auth=user.auth)
user.reload()
# check user.mailing_lists is updated
assert_true(user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST])
assert_equal(
user.mailing_lists[settings.MAILCHIMP_GENERAL_LIST],
payload[settings.MAILCHIMP_GENERAL_LIST]
)
# check that user is subscribed
mock_client.lists.subscribe.assert_called_with(id=list_id,
email={'email': user.username},
merge_vars= {'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True)
def test_get_mailchimp_get_endpoint_returns_200(self):
url = api_url_for('mailchimp_get_endpoint')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_subscribe_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'subscribe' actions sent to the OSF via mailchimp
webhooks update the OSF database.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is not subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': False}
user.save()
# user subscribes and webhook sends request to OSF
data = {'type': 'subscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_mailchimp_webhook_profile_action_does_not_change_user(self, mock_get_mailchimp_api):
""" Test that 'profile' actions sent to the OSF via mailchimp
webhooks do not cause any database changes.
"""
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user hits subscribe again, which will update the user's existing info on mailchimp
# webhook sends request (when configured to update on changes made through the API)
data = {'type': 'profile',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field does not change
user.reload()
assert_true(user.mailing_lists[list_name])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_sync_data_from_mailchimp_unsubscribes_user(self, mock_get_mailchimp_api):
list_id = '12345'
list_name = 'OSF General'
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': list_id, 'name': list_name}]}
# user is subscribed to a list
user = AuthUserFactory()
user.mailing_lists = {'OSF General': True}
user.save()
# user unsubscribes through mailchimp and webhook sends request
data = {'type': 'unsubscribe',
'data[list_id]': list_id,
'data[email]': user.username
}
url = api_url_for('sync_data_from_mailchimp') + '?key=' + settings.MAILCHIMP_WEBHOOK_SECRET_KEY
res = self.app.post(url,
data,
content_type="application/x-www-form-urlencoded",
auth=user.auth)
# user field is updated on the OSF
user.reload()
assert_false(user.mailing_lists[list_name])
def test_sync_data_from_mailchimp_fails_without_secret_key(self):
user = AuthUserFactory()
payload = {'values': {'type': 'unsubscribe',
'data': {'list_id': '12345',
'email': 'freddie@cos.io'}}}
url = api_url_for('sync_data_from_mailchimp')
res = self.app.post_json(url, payload, auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.UNAUTHORIZED)
@classmethod
def tearDownClass(cls):
super(TestConfigureMailingListViews, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
# TODO: Move to OSF Storage
class TestFileViews(OsfTestCase):
def setUp(self):
super(TestFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.project.add_contributor(self.user)
self.project.save()
def test_files_get(self):
url = self.project.api_url_for('collect_file_trees')
res = self.app.get(url, auth=self.user.auth)
expected = _view_project(self.project, auth=Auth(user=self.user))
assert_equal(res.status_code, http.OK)
assert_equal(res.json['node'], expected['node'])
assert_in('tree_js', res.json)
assert_in('tree_css', res.json)
def test_grid_data(self):
url = self.project.api_url_for('grid_data')
res = self.app.get(url, auth=self.user.auth).maybe_follow()
assert_equal(res.status_code, http.OK)
expected = rubeus.to_hgrid(self.project, auth=Auth(self.user))
data = res.json['data']
assert_equal(len(data), len(expected))
class TestComments(OsfTestCase):
def setUp(self):
super(TestComments, self).setUp()
self.project = ProjectFactory(is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.non_contributor = AuthUserFactory()
self.user = AuthUserFactory()
self.project.add_contributor(self.user)
self.project.save()
self.user.save()
def _configure_project(self, project, comment_level):
project.comment_level = comment_level
project.save()
def _add_comment(self, project, content=None, **kwargs):
content = content if content is not None else 'hammer to fall'
url = project.api_url + 'comment/'
return self.app.post_json(
url,
{
'content': content,
'isPublic': 'public',
},
**kwargs
)
def test_add_comment_public_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_public_non_contributor(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, auth=self.non_contributor.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(res_comment.pop('dateCreated'))
date_modified = parse_date(res_comment.pop('dateModified'))
serialized_comment = serialize_comment(self.project.commented[0], Auth(user=self.non_contributor))
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.project.creator.auth,
)
self.project.reload()
res_comment = res.json['comment']
date_created = parse_date(str(res_comment.pop('dateCreated')))
date_modified = parse_date(str(res_comment.pop('dateModified')))
serialized_comment = serialize_comment(self.project.commented[0], self.consolidated_auth)
date_created2 = parse_date(serialized_comment.pop('dateCreated'))
date_modified2 = parse_date(serialized_comment.pop('dateModified'))
assert_datetime_equal(date_created, date_created2)
assert_datetime_equal(date_modified, date_modified2)
assert_equal(len(self.project.commented), 1)
assert_equal(res_comment, serialized_comment)
def test_add_comment_private_non_contributor(self):
self._configure_project(self.project, 'private')
res = self._add_comment(
self.project, auth=self.non_contributor.auth, expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_add_comment_logged_out(self):
self._configure_project(self.project, 'public')
res = self._add_comment(self.project)
assert_equal(res.status_code, 302)
assert_in('login', res.headers.get('location'))
def test_add_comment_off(self):
self._configure_project(self.project, None)
res = self._add_comment(
self.project, auth=self.project.creator.auth, expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_add_comment_empty(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='',
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_toolong(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content='toolong' * 500,
auth=self.project.creator.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_add_comment_whitespace(self):
self._configure_project(self.project, 'public')
res = self._add_comment(
self.project, content=' ',
auth=self.project.creator.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_false(getattr(self.project, 'commented', []))
def test_edit_comment(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.project.creator.auth,
)
comment.reload()
assert_equal(res.json['content'], 'edited')
assert_equal(comment.content, 'edited')
def test_edit_comment_short(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': '',
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_toolong(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project, content='short')
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'toolong' * 500,
'isPublic': 'private',
},
auth=self.project.creator.auth,
expect_errors=True,
)
comment.reload()
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(comment.content, 'short')
def test_edit_comment_non_author(self):
"Contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
non_author = AuthUserFactory()
self.project.add_contributor(non_author, auth=self.consolidated_auth)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=non_author.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_edit_comment_non_contributor(self):
"Non-contributors who are not the comment author cannot edit."
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.put_json(
url,
{
'content': 'edited',
'isPublic': 'private',
},
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_delete_comment_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
self.app.delete_json(
url,
auth=self.project.creator.auth,
)
comment.reload()
assert_true(comment.is_deleted)
def test_delete_comment_non_author(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
url = self.project.api_url + 'comment/{0}/'.format(comment._id)
res = self.app.delete_json(
url,
auth=self.non_contributor.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.FORBIDDEN)
comment.reload()
assert_false(comment.is_deleted)
def test_report_abuse(self):
self._configure_project(self.project, 'public')
comment = CommentFactory(node=self.project)
reporter = AuthUserFactory()
url = self.project.api_url + 'comment/{0}/report/'.format(comment._id)
self.app.post_json(
url,
{
'category': 'spam',
'text': 'ads',
},
auth=reporter.auth,
)
comment.reload()
assert_in(reporter._id, comment.reports)
assert_equal(
comment.reports[reporter._id],
{'category': 'spam', 'text': 'ads'}
)
def test_can_view_private_comments_if_contributor(self):
self._configure_project(self.project, 'public')
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(len(res.json['comments']), 1)
def test_view_comments_with_anonymous_link(self):
self.project.save()
self.project.set_privacy('private')
self.project.reload()
user = AuthUserFactory()
link = PrivateLinkFactory(anonymous=True)
link.nodes.append(self.project)
link.save()
CommentFactory(node=self.project, user=self.project.creator, is_public=False)
url = self.project.api_url + 'comments/'
res = self.app.get(url, {"view_only": link.key}, auth=user.auth)
comment = res.json['comments'][0]
author = comment['author']
assert_in('A user', author['name'])
assert_false(author['gravatarUrl'])
assert_false(author['url'])
assert_false(author['id'])
def test_discussion_recursive(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
user_l1 = UserFactory()
user_l2 = UserFactory()
comment_l1 = CommentFactory(node=self.project, target=comment_l0, user=user_l1)
CommentFactory(node=self.project, target=comment_l1, user=user_l2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
def test_discussion_no_repeats(self):
self._configure_project(self.project, 'public')
comment_l0 = CommentFactory(node=self.project)
comment_l1 = CommentFactory(node=self.project, target=comment_l0)
CommentFactory(node=self.project, target=comment_l1)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 1)
def test_discussion_sort(self):
self._configure_project(self.project, 'public')
user1 = UserFactory()
user2 = UserFactory()
CommentFactory(node=self.project)
for _ in range(3):
CommentFactory(node=self.project, user=user1)
for _ in range(2):
CommentFactory(node=self.project, user=user2)
url = self.project.api_url + 'comments/discussion/'
res = self.app.get(url)
assert_equal(len(res.json['discussion']), 3)
observed = [user['id'] for user in res.json['discussion']]
expected = [user1._id, user2._id, self.project.creator._id]
assert_equal(observed, expected)
def test_view_comments_updates_user_comments_view_timestamp(self):
CommentFactory(node=self.project)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
user_timestamp = self.user.comments_viewed_timestamp[self.project._id]
view_timestamp = dt.datetime.utcnow()
assert_datetime_equal(user_timestamp, view_timestamp)
def test_confirm_non_contrib_viewers_dont_have_pid_in_comments_view_timestamp(self):
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.non_contributor.reload()
assert_not_in(self.project._id, self.non_contributor.comments_viewed_timestamp)
def test_n_unread_comments_updates_when_comment_is_added(self):
self._add_comment(self.project, auth=self.project.creator.auth)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
url = self.project.api_url_for('update_comments_timestamp')
res = self.app.put_json(url, auth=self.user.auth)
self.user.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 0)
def test_n_unread_comments_updates_when_comment_reply(self):
comment = CommentFactory(node=self.project, user=self.project.creator)
reply = CommentFactory(node=self.project, user=self.user, target=comment)
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_updates_when_comment_is_edited(self):
self.test_edit_comment()
self.project.reload()
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json.get('nUnread'), 1)
def test_n_unread_comments_is_zero_when_no_comments(self):
url = self.project.api_url_for('list_comments')
res = self.app.get(url, auth=self.project.creator.auth)
assert_equal(res.json.get('nUnread'), 0)
class TestTagViews(OsfTestCase):
def setUp(self):
super(TestTagViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
@unittest.skip('Tags endpoint disabled for now.')
def test_tag_get_returns_200(self):
url = web_url_for('project_tag', tag='foo')
res = self.app.get(url)
assert_equal(res.status_code, 200)
@requires_search
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
self.project = ProjectFactory(creator=UserFactory(fullname='Robbie Williams'))
self.contrib = UserFactory(fullname='Brian May')
for i in range(0, 12):
UserFactory(fullname='Freddie Mercury{}'.format(i))
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_contributor(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('gravatar_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
def test_search_pagination_default(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
def test_search_pagination_default_page_1(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
def test_search_pagination_default_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
def test_search_pagination_smaller_pages(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
def test_search_pagination_smaller_pages_page_2(self):
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 2)
assert_equal(page, 2)
assert_equal(pages, 3)
def test_search_projects(self):
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, title="foo")
self.project_two = ProjectFactory(creator=self.user_two, title="bar")
self.public_project = ProjectFactory(creator=self.user_two, is_public=True, title="baz")
self.registration_project = RegistrationFactory(creator=self.user, title="qux")
self.folder = FolderFactory(creator=self.user, title="quux")
self.dashboard = DashboardFactory(creator=self.user, title="Dashboard")
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
class TestReorderComponents(OsfTestCase):
def setUp(self):
super(TestReorderComponents, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
# Project is public
self.project = ProjectFactory.build(creator=self.creator, public=True)
self.project.add_contributor(self.contrib, auth=Auth(self.creator))
# subcomponent that only creator can see
self.public_component = NodeFactory(creator=self.creator, public=True)
self.private_component = NodeFactory(creator=self.creator, public=False)
self.project.nodes.append(self.public_component)
self.project.nodes.append(self.private_component)
self.project.save()
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_reorder_components_with_private_component(self):
# contrib tries to reorder components
payload = {
'new_list': [
'{0}:node'.format(self.private_component._primary_key),
'{0}:node'.format(self.public_component._primary_key),
]
}
url = self.project.api_url_for('project_reorder_components')
res = self.app.post_json(url, payload, auth=self.contrib.auth)
assert_equal(res.status_code, 200)
class TestDashboardViews(OsfTestCase):
def setUp(self):
super(TestDashboardViews, self).setUp()
self.creator = AuthUserFactory()
self.contrib = AuthUserFactory()
self.dashboard = DashboardFactory(creator=self.creator)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/571
def test_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
# Get the All My Projects smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_PROJECTS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_get_dashboard_nodes(self):
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=self.creator.auth)
assert_equal(res.status_code, 200)
nodes = res.json['nodes']
assert_equal(len(nodes), 2)
project_serialized = nodes[0]
assert_equal(project_serialized['id'], project._primary_key)
def test_get_dashboard_nodes_shows_components_if_user_is_not_contrib_on_project(self):
# User creates a project with a component
project = ProjectFactory(creator=self.creator)
component = NodeFactory(creator=self.creator, parent=project)
# User adds friend as a contributor to the component but not the
# project
friend = AuthUserFactory()
component.add_contributor(friend, auth=Auth(self.creator))
component.save()
# friend requests their dashboard nodes
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
# Response includes component
assert_equal(len(nodes), 1)
assert_equal(nodes[0]['id'], component._primary_key)
# friend requests dashboard nodes, filtering against components
url = api_url_for('get_dashboard_nodes', no_components=True)
res = self.app.get(url, auth=friend.auth)
nodes = res.json['nodes']
assert_equal(len(nodes), 0)
def test_get_dashboard_nodes_admin_only(self):
friend = AuthUserFactory()
project = ProjectFactory(creator=self.creator)
# Friend is added as a contributor with read+write (not admin)
# permissions
perms = permissions.expand_permissions(permissions.WRITE)
project.add_contributor(friend, auth=Auth(self.creator), permissions=perms)
project.save()
url = api_url_for('get_dashboard_nodes')
res = self.app.get(url, auth=friend.auth)
assert_equal(res.json['nodes'][0]['id'], project._primary_key)
# Can filter project according to permission
url = api_url_for('get_dashboard_nodes', permissions='admin')
res = self.app.get(url, auth=friend.auth)
assert_equal(len(res.json['nodes']), 0)
def test_get_dashboard_nodes_invalid_permission(self):
url = api_url_for('get_dashboard_nodes', permissions='not-valid')
res = self.app.get(url, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_registered_components_with_are_accessible_from_dashboard(self):
project = ProjectFactory(creator=self.creator, public=False)
component = NodeFactory(creator=self.creator, parent=project)
component.add_contributor(self.contrib, auth=Auth(self.creator))
component.save()
project.register_node(
None, Auth(self.creator), '', '',
)
# Get the All My Registrations smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID)
res = self.app.get(url, auth=self.contrib.auth)
assert_equal(len(res.json['data']), 1)
def test_archiving_nodes_appear_in_all_my_registrations(self):
project = ProjectFactory(creator=self.creator, public=False)
reg = RegistrationFactory(project=project, user=self.creator)
# Get the All My Registrations smart folder from the dashboard
url = api_url_for('get_dashboard', nid=ALL_MY_REGISTRATIONS_ID)
res = self.app.get(url, auth=self.creator.auth)
assert_equal(res.json['data'][0]['node_id'], reg._id)
def test_untouched_node_is_collapsed(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_expand_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_collapse_node_sets_expand_to_true(self):
found_item = False
folder = FolderFactory(creator=self.creator, public=True)
self.dashboard.add_pointer(folder, auth=Auth(self.creator))
# Expand the folder
url = api_url_for('expand', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_true(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
# Collapse the folder
found_item = False
url = api_url_for('collapse', pid=folder._id)
self.app.post(url, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'node_id'] == folder._id:
found_item = True
assert_false(dashboard_item[u'expand'], "Expand state was not set properly.")
assert_true(found_item, "Did not find the folder in the dashboard.")
def test_folder_new_post(self):
url = api_url_for('folder_new_post', nid=self.dashboard._id)
found_item = False
# Make the folder
title = 'New test folder'
payload = {'title': title, }
self.app.post_json(url, payload, auth=self.creator.auth)
# Serialize the dashboard and test
url = api_url_for('get_dashboard', nid=self.dashboard._id)
dashboard_data = self.app.get(url, auth=self.creator.auth)
dashboard_json = dashboard_data.json[u'data']
for dashboard_item in dashboard_json:
if dashboard_item[u'name'] == title:
found_item = True
assert_true(found_item, "Did not find the folder in the dashboard.")
class TestWikiWidgetViews(OsfTestCase):
def setUp(self):
super(TestWikiWidgetViews, self).setUp()
# project with no home wiki page
self.project = ProjectFactory()
self.read_only_contrib = AuthUserFactory()
self.project.add_contributor(self.read_only_contrib, permissions='read')
self.noncontributor = AuthUserFactory()
# project with no home wiki content
self.project2 = ProjectFactory(creator=self.project.creator)
self.project2.add_contributor(self.read_only_contrib, permissions='read')
self.project2.update_node_wiki(name='home', content='', auth=Auth(self.project.creator))
def test_show_wiki_for_contributors_when_no_wiki_or_content(self):
assert_true(_should_show_wiki_widget(self.project, self.project.creator))
assert_true(_should_show_wiki_widget(self.project2, self.project.creator))
def test_show_wiki_is_false_for_read_contributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.read_only_contrib))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
def test_show_wiki_is_false_for_noncontributors_when_no_wiki_or_content(self):
assert_false(_should_show_wiki_widget(self.project, self.noncontributor))
assert_false(_should_show_wiki_widget(self.project2, self.read_only_contrib))
class TestForkViews(OsfTestCase):
def setUp(self):
super(TestForkViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory.build(creator=self.user, is_public=True)
self.consolidated_auth = Auth(user=self.project.creator)
self.user.save()
self.project.save()
def test_fork_private_project_non_contributor(self):
self.project.set_privacy("private")
self.project.save()
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url,
auth=non_contributor.auth,
expect_errors=True)
assert_equal(res.status_code, http.FORBIDDEN)
def test_fork_public_project_non_contributor(self):
url = self.project.api_url_for('node_fork_page')
non_contributor = AuthUserFactory()
res = self.app.post_json(url, auth=non_contributor.auth)
assert_equal(res.status_code, 200)
def test_fork_project_contributor(self):
contributor = AuthUserFactory()
self.project.set_privacy("private")
self.project.add_contributor(contributor)
self.project.save()
url = self.project.api_url_for('node_fork_page')
res = self.app.post_json(url, auth=contributor.auth)
assert_equal(res.status_code, 200)
def test_registered_forks_dont_show_in_fork_list(self):
fork = self.project.fork_node(self.consolidated_auth)
RegistrationFactory(project=fork)
url = self.project.api_url_for('get_forks')
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['nodes']), 1)
assert_equal(res.json['nodes'][0]['id'], fork._id)
class TestProjectCreation(OsfTestCase):
def setUp(self):
super(TestProjectCreation, self).setUp()
self.creator = AuthUserFactory()
self.url = api_url_for('project_new_post')
def test_needs_title(self):
res = self.app.post_json(self.url, {}, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_create_component_strips_html(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
url = web_url_for('project_new_node', pid=project._id)
post_data = {'title': '<b>New <blink>Component</blink> Title</b>', 'category': ''}
request = self.app.post(url, post_data, auth=user.auth).follow()
project.reload()
child = project.nodes[0]
# HTML has been stripped
assert_equal(child.title, 'New Component Title')
def test_strip_html_from_title(self):
payload = {
'title': 'no html <b>here</b>'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_equal('no html here', node.title)
def test_only_needs_title(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
def test_title_must_be_one_long(self):
payload = {
'title': ''
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_title_must_be_less_than_200(self):
payload = {
'title': ''.join([str(x) for x in xrange(0, 250)])
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_fails_to_create_project_with_whitespace_title(self):
payload = {
'title': ' '
}
res = self.app.post_json(
self.url, payload, auth=self.creator.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_creates_a_project(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.title, 'Im a real title')
def test_new_project_returns_serialized_node_data(self):
payload = {
'title': 'Im a real title'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = res.json['newNode']
assert_true(node)
assert_equal(node['title'], 'Im a real title')
def test_description_works(self):
payload = {
'title': 'Im a real title',
'description': 'I describe things!'
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.description, 'I describe things!')
def test_can_template(self):
other_node = ProjectFactory(creator=self.creator)
payload = {
'title': 'Im a real title',
'template': other_node._id
}
res = self.app.post_json(self.url, payload, auth=self.creator.auth)
assert_equal(res.status_code, 201)
node = Node.load(res.json['projectUrl'].replace('/', ''))
assert_true(node)
assert_true(node.template_node, other_node)
def test_project_before_template_no_addons(self):
project = ProjectFactory()
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_equal(res.json['prompts'], [])
def test_project_before_template_with_addons(self):
project = ProjectWithAddonFactory(addon='github')
res = self.app.get(project.api_url_for('project_before_template'), auth=project.creator.auth)
assert_in('GitHub', res.json['prompts'])
def test_project_new_from_template_non_user(self):
project = ProjectFactory()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=None)
assert_equal(res.status_code, 302)
res2 = res.follow(expect_errors=True)
assert_equal(res2.status_code, 301)
assert_equal(res2.request.path, '/login')
def test_project_new_from_template_public_non_contributor(self):
non_contributor = AuthUserFactory()
project = ProjectFactory(is_public=True)
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=non_contributor.auth)
assert_equal(res.status_code, 201)
def test_project_new_from_template_contributor(self):
contributor = AuthUserFactory()
project = ProjectFactory(is_public=False)
project.add_contributor(contributor)
project.save()
url = api_url_for('project_new_from_template', nid=project._id)
res = self.app.post(url, auth=contributor.auth)
assert_equal(res.status_code, 201)
class TestUnconfirmedUserViews(OsfTestCase):
def test_can_view_profile(self):
user = UnconfirmedUserFactory()
url = web_url_for('profile_view_id', uid=user._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestProfileNodeList(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.public = ProjectFactory(is_public=True)
self.public_component = NodeFactory(parent=self.public, is_public=True)
self.private = ProjectFactory(is_public=False)
self.deleted = ProjectFactory(is_public=True, is_deleted=True)
for node in (self.public, self.public_component, self.private, self.deleted):
node.add_contributor(self.user, auth=Auth(node.creator))
node.save()
def test_get_public_projects(self):
url = api_url_for('get_public_projects', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
assert_not_in(self.public_component._id, node_ids)
def test_get_public_components(self):
url = api_url_for('get_public_components', uid=self.user._id)
res = self.app.get(url)
node_ids = [each['id'] for each in res.json['nodes']]
assert_in(self.public_component._id, node_ids)
assert_not_in(self.public._id, node_ids)
assert_not_in(self.private._id, node_ids)
assert_not_in(self.deleted._id, node_ids)
class TestStaticFileViews(OsfTestCase):
def test_robots_dot_txt(self):
res = self.app.get('/robots.txt')
assert_equal(res.status_code, 200)
assert_in('User-agent', res)
assert_in('text/plain', res.headers['Content-Type'])
def test_favicon(self):
res = self.app.get('/favicon.ico')
assert_equal(res.status_code, 200)
assert_in('image/vnd.microsoft.icon', res.headers['Content-Type'])
def test_getting_started_page(self):
res = self.app.get('/getting-started/')
assert_equal(res.status_code, 200)
class TestUserConfirmSignal(OsfTestCase):
def test_confirm_user_signal_called_when_user_claims_account(self):
unclaimed_user = UnconfirmedUserFactory()
# unclaimed user has been invited to a project.
referrer = UserFactory()
project = ProjectFactory(creator=referrer)
unclaimed_user.add_unclaimed_record(project, referrer, 'foo')
unclaimed_user.save()
token = unclaimed_user.get_unclaimed_record(project._primary_key)['token']
with capture_signals() as mock_signals:
url = web_url_for('claim_user_form', pid=project._id, uid=unclaimed_user._id, token=token)
payload = {'username': unclaimed_user.username,
'password': 'password',
'password2': 'password'}
res = self.app.post(url, payload)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
def test_confirm_user_signal_called_when_user_confirms_email(self):
unconfirmed_user = UnconfirmedUserFactory()
unconfirmed_user.save()
# user goes to email confirmation link
token = unconfirmed_user.get_confirmation_token(unconfirmed_user.username)
with capture_signals() as mock_signals:
url = web_url_for('confirm_email_get', uid=unconfirmed_user._id, token=token)
res = self.app.get(url)
assert_equal(res.status_code, 302)
assert_equal(mock_signals.signals_sent(), set([auth.signals.user_confirmed]))
if __name__ == '__main__':
unittest.main()
|
sbt9uc/osf.io
|
tests/test_views.py
|
Python
|
apache-2.0
| 181,352
|
[
"Brian"
] |
c7103b9ead7aeb45849934510dfd22e337856511c452ceca25984216689ad266
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Rebuild Secondary Indexes"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from __future__ import print_function
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".Rebuild")
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gui.plug import tool
from gramps.gui.dialog import OkDialog
from gramps.gen.updatecallback import UpdateCallback
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class Rebuild(tool.Tool, UpdateCallback):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
self.db.disable_signals()
if uistate:
self.callback = uistate.pulse_progressbar
uistate.set_busy_cursor(True)
uistate.progress.show()
uistate.push_message(dbstate, _("Rebuilding secondary indexes..."))
UpdateCallback.__init__(self, self.callback)
self.set_total(12)
self.db.rebuild_secondary(self.update)
self.reset()
uistate.set_busy_cursor(False)
uistate.progress.hide()
OkDialog(_("Secondary indexes rebuilt"),
_('All secondary indexes have been rebuilt.'),
parent=uistate.window)
else:
print("Rebuilding Secondary Indexes...")
self.db.rebuild_secondary(self.update_empty)
print("All secondary indexes have been rebuilt.")
self.db.enable_signals()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class RebuildOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/tool/rebuild.py
|
Python
|
gpl-2.0
| 3,667
|
[
"Brian"
] |
7a2b9863da1ebe2017536b443f784fa1c1b1d78936c38d4015d5fea89decc58c
|
import os
import time
import string
import random
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.contrib.auth.models import User, Group
from django.contrib.staticfiles.templatetags.staticfiles import static
from splinter import Browser
from django.conf import settings
# from jp2_online.settings.base import BASE_DIR
from administracion.models import Escuela
from estudios_socioeconomicos.models import Estudio, Seccion, Pregunta, Respuesta
from estudios_socioeconomicos.models import Subseccion, OpcionRespuesta, Foto
from familias.models import Familia, Integrante, Alumno, Tutor
from indicadores.models import Periodo
from perfiles_usuario.models import Capturista
from estudios_socioeconomicos.load import load_data
from perfiles_usuario.utils import CAPTURISTA_GROUP
class TestViewsCapturaEstudio(StaticLiveServerTestCase):
"""Integration test suite for testing the views in the app: capturista.
Test the urls for 'capturista' which allow the user to fill out a study.
Attributes
----------
browser : Browser
Driver to navigate through websites and to run integration tests.
"""
def setUp(self):
""" Initialize the browser, create a user, a family and a study.
Perform login.
"""
self.browser = Browser('chrome')
test_username = 'erikiano'
test_password = 'vacalalo'
elerik = User.objects.create_user(
username=test_username,
email='latelma@junipero.sas',
password=test_password,
first_name='telma',
last_name='suapellido')
self.capturista = Capturista.objects.create(user=elerik)
self.capturista.save()
load_data()
self.familia = Familia.objects.create(
numero_hijos_diferentes_papas=3,
explicacion_solvencia='narco',
estado_civil='secreto',
localidad='otro')
self.estudio = Estudio.objects.create(
capturista=self.capturista,
familia=self.familia,
status=Estudio.BORRADOR)
self.estudio.save()
self.familia.save()
self.assertEqual(Respuesta.objects.all().count(), Pregunta.objects.all().count())
self.test_url_name = 'captura:contestar_estudio'
self.browser.visit(self.live_server_url + reverse('tosp_auth:login'))
self.browser.fill('username', test_username)
self.browser.fill('password', test_password)
self.browser.find_by_id('login-submit').click()
def tearDown(self):
self.browser.driver.close()
self.browser.quit()
def test_displaying_question_and_answers(self):
""" Tests that when a user loads the URL for filling a study,
the html elements for all the questions in that section
are rendered.
"""
secciones = Seccion.objects.all().order_by('numero')
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
for subseccion in subsecciones:
self.assertTrue(self.browser.is_text_present(subseccion.nombre))
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
for pregunta in preguntas:
respuesta = Respuesta.objects.filter(pregunta=pregunta)[0]
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
for i in range(num_opciones):
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(i))
self.assertNotEqual(answer_input, [])
self.assertTrue(self.browser.is_text_present(pregunta.texto))
else:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta')
self.assertNotEqual(answer_input, [])
self.assertTrue(self.browser.is_text_present(pregunta.texto))
def test_incorrect_url_parameters(self):
""" Test that a user can't query inexistent studies or sections.
"""
secciones = Seccion.objects.all().order_by('numero')
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': 0, 'numero_seccion': secciones[0].numero}))
self.assertTrue(self.browser.is_text_present('Lo sentimos'))
def test_adding_more_answers(self):
""" Test that a user can dynamically add more questions to a
study.
"""
secciones = Seccion.objects.all().order_by('numero')
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
opciones = OpcionRespuesta.objects.filter(pregunta__in=preguntas)
preguntas_texto = preguntas.exclude(pk__in=opciones.values_list('pregunta', flat=True))
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
number_answers = Respuesta.objects.all().count()
self.browser.find_by_id('answer-for-' + str(preguntas_texto[0].id)).first.click()
time.sleep(.1)
self.assertEqual(number_answers + 1, Respuesta.objects.all().count())
nueva_respuesta = Respuesta.objects.all().order_by('-id')[0]
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(nueva_respuesta.id) + '-respuesta')
self.assertNotEqual(answer_input, [])
def test_removing_ansers(self):
""" Test that a user can dynamically remove questions from a study.
"""
secciones = Seccion.objects.all().order_by('numero')
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
opciones = OpcionRespuesta.objects.filter(pregunta__in=preguntas)
preguntas_texto = preguntas.exclude(pk__in=opciones.values_list('pregunta', flat=True))
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
self.browser.find_by_id('answer-for-' + str(preguntas_texto[0].id)).first.click()
number_answers = Respuesta.objects.all().count()
self.browser.find_by_css('.delete-answer').first.click()
self.assertNotEqual(number_answers, Respuesta.objects.all().count())
def test_submitting_answers(self):
""" Test that when a user submits his answers and moves on to the
next section the answers are saved.
"""
secciones = Seccion.objects.all().order_by('numero')
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
respuestas = Respuesta.objects.filter(pregunta__in=preguntas)
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
random_texts = {}
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
answer_input.check()
else:
new_text = ''.join(random.choice(string.ascii_uppercase) for _ in range(12))
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
answer_input.fill(new_text)
random_texts[respuesta.id] = new_text
self.browser.find_by_id('next_section_button').first.click()
time.sleep(.1)
self.browser.find_by_id('previous_section_button').first.click()
time.sleep(.1)
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
self.assertTrue(answer_input.checked)
else:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
self.assertEqual(answer_input.value, random_texts[respuesta.id])
def test_submitting_answer_with_dynamic_answers(self):
""" Test that answers generated dynamically are being saved after submission.
"""
secciones = Seccion.objects.all().order_by('numero')
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
opciones = OpcionRespuesta.objects.filter(pregunta__in=preguntas)
preguntas_texto = preguntas.exclude(pk__in=opciones.values_list('pregunta', flat=True))
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
number_answers = Respuesta.objects.all().count()
self.browser.find_by_id('answer-for-' + str(preguntas_texto[0].id)).first.click()
time.sleep(.1)
self.assertEqual(number_answers + 1, Respuesta.objects.all().count())
nueva_respuesta = Respuesta.objects.all().order_by('-id')[0]
new_text = ''.join(random.choice(string.ascii_uppercase) for _ in range(12))
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(nueva_respuesta.id) + '-respuesta').first
answer_input.fill(new_text)
self.browser.find_by_id('next_section_button').first.click()
time.sleep(.1)
self.browser.find_by_id('previous_section_button').first.click()
time.sleep(.1)
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(nueva_respuesta.id) + '-respuesta').first
self.assertEqual(answer_input.value, new_text)
def test_submitting_two_studies(self):
""" Test that answers generated dynamically are being saved after submission.
"""
secciones = Seccion.objects.all().order_by('numero')
subsecciones = Subseccion.objects.filter(seccion=secciones[0])
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
respuestas = Respuesta.objects.filter(pregunta__in=preguntas)
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
random_texts = {}
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
answer_input.check()
else:
new_text = ''.join(random.choice(string.ascii_uppercase) for _ in range(12))
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
answer_input.fill(new_text)
random_texts[respuesta.id] = new_text
self.browser.find_by_id('next_section_button').first.click()
time.sleep(.1)
self.browser.find_by_id('previous_section_button').first.click()
time.sleep(.1)
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
self.assertTrue(answer_input.checked)
else:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
self.assertEqual(answer_input.value, random_texts[respuesta.id])
self.browser.find_by_css('.fa-file-text').first.click()
time.sleep(.1)
self.browser.find_by_id('create_estudio').click()
time.sleep(.1)
self.browser.find_by_id('id_nombre_familiar').fill('Simpson')
self.browser.find_by_id('id_numero_hijos_diferentes_papas').fill(2)
self.browser.select('estado_civil', 'casado_iglesia')
self.browser.select('localidad', 'poblado_jurica')
self.browser.find_by_id('submit_familia').click()
time.sleep(.1)
new_study = Estudio.objects.all()[1]
self.browser.find_by_id('navigation_cuestionario').click()
time.sleep(.1)
random_texts = {}
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta, estudio=new_study)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
answer_input.check()
else:
new_text = ''.join(random.choice(string.ascii_uppercase) for _ in range(12))
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
answer_input.fill(new_text)
random_texts[respuesta.id] = new_text
self.browser.find_by_id('next_section_button').first.click()
time.sleep(.1)
self.browser.find_by_id('previous_section_button').first.click()
time.sleep(.1)
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta, estudio=new_study)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-eleccion_' + str(num_opciones-1))
self.assertTrue(answer_input.checked)
else:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
self.assertEqual(answer_input.value, random_texts[respuesta.id])
def test_passing_all_sections(self):
""" Test going through all possible sections.
"""
secciones = Seccion.objects.all().order_by('numero')
self.browser.visit(
self.live_server_url + reverse(
self.test_url_name,
kwargs={'id_estudio': self.estudio.id, 'numero_seccion': secciones[0].numero}))
for seccion in secciones:
time.sleep(.1)
self.assertTrue(self.browser.is_text_present(seccion.nombre))
self.browser.find_by_id('next_section_button').first.click()
class TestViewsFamiliaLive(StaticLiveServerTestCase):
""" The purpose of this class is to suplement TestViewsFamilia, as some of the required tests
cannot be ran via de django client.
Attributes
----------
browser : Browser
Driver to navigate through websites and to run integration tests.
elerik : User
User that will be used as a capturista in order to fill all everything
related with familia.
familia1 : Familia
Used in tests that depend on creating an object related to a familia.
estudio1 : Estudio
Used in tests that depend on creating or editing an existent estudio.
integrante1 : Integrante
Used in tests that depend on creating an object related to an integrante.
integrante2 : Integrante
Used in tests that depend on editing an alumno object.
alumno1 : Alumno
Used in the tests that depend on creating or editing an object related to an alumno.
tutor1: Tutor
Used in the tests that depend on creating or editing an object related to a tutor.
escuela : Used in tests that depend on creating an object related to an escuela
capturista : Capturista
Asociated with the User, as this object is required for permissions and
creation.
"""
def setUp(self):
""" Creates all the initial necessary objects for the tests
"""
self.browser = Browser('chrome')
test_username = 'erikiano'
test_password = 'vacalalo'
elerik = User.objects.create_user(
username=test_username,
email='latelma@junipero.sas',
password=test_password,
first_name='telma',
last_name='suapellido')
self.escuela = Escuela.objects.create(nombre='Juan Pablo')
self.capturista = Capturista.objects.create(user=elerik)
numero_hijos_inicial = 3
estado_civil_inicial = 'soltero'
localidad_inicial = 'salitre'
self.familia1 = Familia.objects.create(
numero_hijos_diferentes_papas=numero_hijos_inicial,
estado_civil=estado_civil_inicial,
localidad=localidad_inicial)
self.estudio1 = Estudio.objects.create(capturista=self.capturista,
familia=self.familia1)
self.integrante1 = Integrante.objects.create(familia=self.familia1,
nombres='Alberto',
apellidos='Lopez',
nivel_estudios='doctorado',
fecha_de_nacimiento='1996-02-26')
self.integrante2 = Integrante.objects.create(familia=self.familia1,
nombres='Pedro',
apellidos='Perez',
nivel_estudios='doctorado',
fecha_de_nacimiento='1996-02-26')
self.alumno1 = Alumno.objects.create(integrante=self.integrante1,
numero_sae='5876',
escuela=self.escuela)
self.tutor1 = Tutor.objects.create(integrante=self.integrante2,
relacion='padre')
self.browser.visit(self.live_server_url + reverse('tosp_auth:login'))
self.browser.fill('username', test_username)
self.browser.fill('password', test_password)
self.browser.find_by_id('login-submit').click()
def tearDown(self):
""" At the end of tests, close the browser.
"""
self.browser.driver.close()
self.browser.quit()
def test_edit_integrantes(self):
""" Test that we can edit multiple integrantes.
"""
new_name = 'Alejandro'
numero_sae = '666'
url = self.live_server_url + reverse('captura:list_integrantes',
kwargs={'id_familia': self.familia1.id})
self.browser.visit(url)
self.browser.find_by_css('.edit-integrante-link').first.click()
time.sleep(.3)
# sin sae
self.browser.find_by_css('#modal_edit_integrante #id_numero_sae').first.fill('')
self.browser.find_by_css('#modal_edit_integrante #btn_send_create_user').first.click()
self.assertTrue(
self.browser.is_text_present('El estudiante necesita el número sae, ' +
'su plantel y ciclo escolar'))
self.browser.find_by_css('.swal2-confirm').first.click()
# con sae
self.browser.find_by_css('#modal_edit_integrante #id_nombres').first.fill(new_name)
self.browser.find_by_css('#modal_edit_integrante #id_numero_sae').first.fill(numero_sae)
self.browser.find_by_css('#modal_edit_integrante #btn_send_create_user').first.click()
self.assertTrue(self.browser.is_text_present('Integrante Editado'))
self.browser.find_by_css('.swal2-confirm').first.click()
integrante = Integrante.objects.get(pk=self.integrante1.pk)
self.assertEqual(new_name, integrante.nombres)
alumno = Alumno.objects.get(integrante=self.integrante1.pk)
self.assertEqual(numero_sae, alumno.numero_sae)
self.assertTrue(self.browser.is_text_present(new_name))
# otro usuario
new_name = 'Peter'
new_relation = 'tutor'
self.browser.find_by_css('.edit-integrante-link')[1].click()
time.sleep(.3)
self.assertTrue(self.browser.is_text_present('Relacion'))
self.browser.find_by_css('#modal_edit_integrante #id_nombres').first.fill(new_name)
search_xpath = '//DIV[@id="modal_edit_integrante"]\
//SELECT[@id="id_relacion"]\
//OPTION[@value="' + new_relation + '"]'
self.browser.find_by_xpath(search_xpath).click()
self.browser.find_by_css('#modal_edit_integrante #btn_send_create_user').first.click()
time.sleep(.2)
self.assertTrue(self.browser.is_text_present('Integrante Editado'))
self.browser.find_by_css('.swal2-confirm').first.click()
integrante = Integrante.objects.get(pk=self.integrante2.pk)
self.assertEqual(new_name, integrante.nombres)
self.assertEqual(integrante.tutor_integrante.relacion, new_relation)
self.assertTrue(self.browser.is_text_present(new_name))
def send_create_integrante_form(self, nombres, apellidos, telefono, correo):
"""Function which fills the user creation form and tries to send it.
"""
self.browser.find_by_id('btn_modal_create_integrante').click()
time.sleep(0.3)
self.browser.find_by_id('id_nombres').first.fill(nombres)
self.browser.find_by_id('id_apellidos').first.fill(apellidos)
self.browser.find_by_id('id_telefono').first.fill(telefono)
self.browser.find_by_id('id_correo').first.fill(correo)
self.browser.select('nivel_estudios', '1_grado')
self.browser.find_by_id('id_fecha_de_nacimiento').first.click()
time.sleep(.2)
self.browser.find_by_css('.ui-datepicker-today').first.click()
self.browser.find_by_id('btn_send_create_user').click()
def test_create_integrantes(self):
""" Create two integrantes, checking errors and that they appear on the table.
"""
url = self.live_server_url + reverse('captura:list_integrantes',
kwargs={'id_familia': self.familia1.id})
self.browser.visit(url)
self.send_create_integrante_form(nombres='Elver', apellidos='Ga', telefono='4424567899',
correo='abc@abc.com')
self.assertTrue(self.browser.is_text_present('Integrante Creado'))
self.browser.find_by_css('.swal2-confirm').first.click()
time.sleep(.2)
self.assertTrue(self.browser.is_text_present('Elver'))
self.send_create_integrante_form(nombres='Eugenio', apellidos='Ga', telefono='-1',
correo='abc@abc.com')
self.assertTrue(self.browser.is_text_present('El número de telefono'))
time.sleep(.2)
self.browser.find_by_css('.swal2-confirm').first.click()
time.sleep(.2)
self.browser.find_by_id('id_telefono').first.fill('123456789')
time.sleep(.2)
self.browser.find_by_id('btn_send_create_user').click()
self.assertTrue(self.browser.is_text_present('Integrante Creado'))
self.browser.find_by_css('.swal2-confirm').first.click()
time.sleep(.2)
self.assertTrue(self.browser.is_text_present('Eugenio'))
class TestViewsAdministracion(StaticLiveServerTestCase):
"""Integration test suite for testing the views in the app: captura.
Test the urls for 'captura' which make up the capturista dashboard.
A user is created in order to test they are displayed.
Attributes
----------
browser : Browser
Driver to navigate through websites and to run integration tests.
"""
def setUp(self):
""" Initialize the browser and create a user, before running the tests.
"""
self.browser = Browser('chrome')
test_username = 'estebes'
test_password = 'junipero'
estebes = User.objects.create_user(
username=test_username, email='juan@example.com', password=test_password,
first_name='Estebes', last_name='Thelmapellido')
capturista = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
capturista.user_set.add(estebes)
capturista.save()
self.capturista = Capturista.objects.create(user=estebes)
self.browser.visit(self.live_server_url + reverse('tosp_auth:login'))
self.browser.fill('username', test_username)
self.browser.fill('password', test_password)
self.browser.find_by_id('login-submit').click()
def tearDown(self):
"""At the end of tests, close the browser.
"""
self.browser.driver.close()
self.browser.quit()
def test_capturista_dashboard_if_this_is_empty(self):
"""Test for url 'captura:estudios'.
Visit the url of name 'captura:estudios' and check it loads the
content of the captura dashboard panel.
"""
test_url_name = 'captura:estudios'
self.browser.visit(self.live_server_url + reverse(test_url_name))
# Check for nav_bar partial
# self.assertTrue(self.browser.is_text_present('Instituto Juan Pablo'))
self.assertEqual(Estudio.objects.count(), 0)
# Check that the folling texts are present in the dashboard
self.assertTrue(self.browser.is_text_present('Mis estudios socioeconómicos'))
self.assertTrue(self.browser.is_text_present('Agregar estudio'))
# Check that the following text is present if not exists socio-economic studies
self.assertTrue(self.browser.is_text_present(
'No hay registro de estudios socioeconómicos'))
# Check that the following texts aren't present if not exists any socio-economic study
self.assertFalse(self.browser.is_text_present('Ver retroalimentación'))
self.assertFalse(self.browser.is_text_present('Editar'))
def test_list_studies(self):
"""Test for url 'captura:estudios'.
Creates two socio-economic studies (f1 and f2) the first as rejected
(rechazado) and the second as pending (revision) and visit the url
'captura:estudios' to check it loads both socio-economic studies created
previously.
"""
user = User.objects.get(username='estebes')
user_id = user.id
capturist = Capturista.objects.get(user=user_id)
solvencia = 'No tienen dinero'
estado = Familia.OPCION_ESTADO_SOLTERO
estado2 = Familia.OPCION_ESTADO_CASADO_CIVIL
localidad = Familia.OPCION_LOCALIDAD_JURICA
localidad2 = Familia.OPCION_LOCALIDAD_CAMPANA
f1 = Familia(numero_hijos_diferentes_papas=1, explicacion_solvencia=solvencia,
estado_civil=estado, localidad=localidad)
f1.save()
f2 = Familia(numero_hijos_diferentes_papas=2, explicacion_solvencia=solvencia,
estado_civil=estado2, localidad=localidad2)
f2.save()
e1 = Estudio(capturista_id=capturist.id, familia_id=f1.id,
status=Estudio.RECHAZADO)
e1.save()
e2 = Estudio(capturista_id=capturist.id, familia_id=f2.id,
status=Estudio.REVISION)
e2.save()
test_url_name = 'captura:estudios'
self.browser.visit(self.live_server_url + reverse(test_url_name))
# Check for nav_bar partial
# self.assertTrue(self.browser.is_text_present('Instituto Juan Pablo'))
self.assertEqual(Estudio.objects.count(), 2)
# Check that the following texts are present in the dashboard
self.assertTrue(self.browser.is_text_present('Mis estudios socioeconómicos'))
self.assertTrue(self.browser.is_text_present('Agregar estudio'))
# Check that the following text isn't present if exists any socio-economic study
self.assertFalse(self.browser.is_text_present('No hay registro'))
# Check that the following texts are present if exists any socio-economic study
self.assertTrue(self.browser.is_text_present('Editar'))
class TestViewsFotos(StaticLiveServerTestCase):
""" Integration test suite for testing the views in the app captura,
that surround the creation of editing of the familia model.
Attributes
----------
client : Client
Django Client for the testing of all the views related to the creation
and edition of a family.
elerik : User
User that will be used as a capturista in order to fill all everything
related with familia.
capturista : Capturista
Asociated with the User, as this object is required for permissions and
creation.
escuela : Used in tests that depend on creating an object related to an escuela.
familia1 : Familia
Used in tests that depend on creating or editing an object related to a familia.
estudio1 : Estudio
Used in tests that depend on creating or editing an existent estudio.
integrante1 : Integrante
Used in tests that depend on creating or editing an object related to an integrante.
integrante_contructor_dictionary : dictrionary
Used in order to prevent repetitive code, when creating very similar integrantes
in different tests.
alumno_contructor_dictionary : dictionary
Used in order to prevent repetitive code, when creating very similar alumnos in
different tests.
tutor_constructor_dictionary : dictionary
Used in order to prevent repetivie code, when creating very similar tutores in
different tests.
"""
def setUp(self):
""" Creates all the initial necessary objects for the tests
"""
self.browser = Browser('chrome')
test_username = 'erikiano'
test_password = 'vacalalo'
elerik = User.objects.create_user(
username=test_username,
email='latelma@junipero.sas',
password=test_password,
first_name='telma',
last_name='suapellido')
self.escuela = Escuela.objects.create(nombre='Juan Pablo')
self.capturista = Capturista.objects.create(user=elerik)
numero_hijos_inicial = 3
estado_civil_inicial = 'soltero'
localidad_inicial = 'salitre'
self.familia1 = Familia.objects.create(numero_hijos_diferentes_papas=numero_hijos_inicial,
estado_civil=estado_civil_inicial,
localidad=localidad_inicial)
self.estudio1 = Estudio.objects.create(capturista=self.capturista,
familia=self.familia1)
self.browser.visit(self.live_server_url + reverse('tosp_auth:login'))
self.browser.fill('username', test_username)
self.browser.fill('password', test_password)
self.browser.find_by_id('login-submit').click()
def tearDown(self):
""" At the end of tests, close the browser.
"""
self.browser.driver.close()
self.browser.quit()
def test_upload_photo(self):
""" This test checks that the view 'captura:upload_photo', allows
the upload of a new family photo, and the photo is displayed.
"""
url = reverse('captura:list_photos',
kwargs={'id_estudio': self.estudio1.pk})
static_url = static('test_files/cocina.jpeg')[1:]
test_image = os.path.join(settings.BASE_DIR, static_url)
self.browser.visit(self.live_server_url + url)
self.browser.find_by_id('btn_modal_upload_photo').click()
time.sleep(1)
self.browser.fill('upload', test_image)
self.browser.find_by_id('btn_send_create_photo').click()
time.sleep(1)
self.assertTrue(self.browser.is_text_present('prueba'))
image = Foto.objects.filter(estudio=self.estudio1).last()
self.assertEqual('prueba', image.file_name)
image_url = image.upload.url[1:]
os.remove(os.path.join(os.path.dirname(settings.BASE_DIR), image_url))
def test_check_error_messages(self):
url = reverse('captura:list_photos',
kwargs={'id_estudio': self.estudio1.pk})
number_of_images_before = Foto.objects.filter(estudio=self.estudio1).count()
static_url = static('test_files/fake.jpeg')[1:]
test_image = os.path.join(settings.BASE_DIR, static_url)
self.browser.visit(self.live_server_url + url)
self.browser.find_by_id('btn_modal_upload_photo').click()
time.sleep(1)
self.browser.fill('upload', test_image)
self.browser.find_by_id('btn_send_create_photo').click()
time.sleep(1)
self.assertTrue(self.browser.is_text_present('Upload a valid image'))
number_of_images_after = Foto.objects.filter(estudio=self.estudio1).count()
self.assertEqual(number_of_images_before, number_of_images_after)
class TestViewsCapturaEstudioCompleto(StaticLiveServerTestCase):
"""
"""
def setUp(self):
"""
"""
self.browser = Browser('chrome')
test_username = 'erikiano'
test_password = 'vacalalo'
elerik = User.objects.create_user(
username=test_username,
email='latelma@junipero.sas',
password=test_password,
first_name='telma',
last_name='suapellido')
self.capturista = Capturista.objects.create(user=elerik)
self.capturista.save()
load_data()
self.periodicidad1 = Periodo.objects.create(periodicidad='Semanal',
factor='4',
multiplica=True)
self.escuela = Escuela.objects.create(nombre='Juan Pablo')
self.browser.visit(self.live_server_url + reverse('tosp_auth:login'))
self.browser.fill('username', test_username)
self.browser.fill('password', test_password)
self.browser.find_by_id('login-submit').click()
def tearDown(self):
self.browser.driver.close()
self.browser.quit()
def create_transactions(self, monto, observacion):
"""
"""
self.browser.find_by_id('btn_modal_create_user').click()
time.sleep(.3)
self.browser.find_by_id('id_monto').fill(monto)
self.browser.find_by_id('id_observacion').fill(observacion)
self.browser.select('periodicidad', '1')
self.browser.find_by_id('id_fecha').first.click()
time.sleep(.2)
self.browser.find_by_css('.ui-datepicker-today').first.click()
self.browser.select('tipo', 'comprobable')
self.browser.find_by_id('btn_send_create_user').click()
def test_captura_complete_study(self):
"""
"""
self.browser.visit(self.live_server_url + reverse('captura:estudios'))
self.browser.find_by_id('create_estudio').click()
time.sleep(.1)
""" Create Family and Study
"""
self.browser.find_by_id('id_numero_hijos_diferentes_papas').fill(2)
self.browser.find_by_id('id_nombre_familiar').fill('Pérez')
self.browser.select('estado_civil', 'casado_iglesia')
self.browser.select('localidad', 'poblado_jurica')
self.browser.find_by_id('submit_familia').click()
TestViewsFamiliaLive.send_create_integrante_form(
self,
nombres='Juan',
apellidos='Perez',
telefono='4424567899',
correo='abc@abc.com')
self.browser.find_by_css('.swal2-confirm').first.click()
TestViewsFamiliaLive.send_create_integrante_form(
self,
nombres='Hector',
apellidos='Perez',
telefono='222222222',
correo='efg@abc.com')
self.browser.find_by_css('.swal2-confirm').first.click()
TestViewsFamiliaLive.send_create_integrante_form(
self,
nombres='Laura',
apellidos='Perez',
telefono='4424567899',
correo='hij@abc.com')
self.browser.find_by_css('.swal2-confirm').first.click()
self.browser.find_by_id('next_ingresos_egresos').click()
time.sleep(.1)
self.create_transactions(1000, 'Ninguna')
self.browser.find_by_css('.swal2-confirm').first.click()
self.browser.find_by_id('next_fotos').click()
static_url = static('test_files/cocina.jpeg')[1:]
test_image = os.path.join(settings.BASE_DIR, static_url)
self.browser.find_by_id('btn_modal_upload_photo').click()
time.sleep(1)
self.browser.fill('upload', test_image)
self.browser.find_by_id('btn_send_create_photo').click()
time.sleep(1)
self.assertTrue(self.browser.is_text_present('prueba'))
image = Foto.objects.filter(estudio=Estudio.objects.all().first()).last()
self.assertEqual('prueba', image.file_name)
image_url = image.upload.url[1:]
os.remove(os.path.join(os.path.dirname(settings.BASE_DIR), image_url))
self.browser.find_by_id('next_preguntas').click() # Preguntas
secciones = Seccion.objects.all().order_by('numero')
random_texts = {}
for seccion in secciones:
subsecciones = Subseccion.objects.filter(seccion=seccion)
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id)
+ '-eleccion_' + str(num_opciones-1))
answer_input.check()
else:
new_text = ''.join(
random.choice(string.ascii_uppercase) for _ in range(12))
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
answer_input.fill(new_text)
random_texts[respuesta.id] = new_text
self.browser.find_by_id('next_section_button').first.click()
time.sleep(.1)
self.browser.find_by_id('previous_cuestionario').first.click()
secciones = Seccion.objects.all().order_by('-numero')
for seccion in secciones:
subsecciones = Subseccion.objects.filter(seccion=seccion)
preguntas = Pregunta.objects.filter(subseccion__in=subsecciones)
for pregunta in preguntas:
respuestas = Respuesta.objects.filter(pregunta=pregunta)
for respuesta in respuestas:
num_opciones = OpcionRespuesta.objects.filter(pregunta=pregunta).count()
if num_opciones > 0:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id)
+ '-eleccion_' + str(num_opciones-1))
self.assertTrue(answer_input.checked)
else:
answer_input = self.browser.find_by_id(
'id_respuesta-' + str(respuesta.id) + '-respuesta').first
self.assertEqual(answer_input.value, random_texts[respuesta.id])
self.browser.find_by_id('previous_section_button').first.click()
# Now we JUMPT Between sections and add more info before we upload.
self.browser.find_by_id('navigation_familia').click()
hijos_bast = self.browser.find_by_id('id_numero_hijos_diferentes_papas').first.value
self.assertEqual(hijos_bast, '2')
self.browser.find_by_id('id_numero_hijos_diferentes_papas').fill(4)
self.browser.find_by_id('submit_familia').click()
time.sleep(.1)
self.browser.find_by_id('previous_familia').click()
hijos_bast = self.browser.find_by_id('id_numero_hijos_diferentes_papas').first.value
self.assertEqual(hijos_bast, '4')
self.browser.find_by_id('navigation_integrantes').click()
self.assertTrue(self.browser.is_text_present('Hector'))
self.assertTrue(self.browser.is_text_present('Laura'))
self.assertTrue(self.browser.is_text_present('Juan'))
# CREATE FATHER
self.browser.find_by_id('btn_modal_create_integrante').click()
time.sleep(0.3)
self.browser.find_by_id('id_nombres').first.fill('don')
self.browser.find_by_id('id_apellidos').first.fill('martines')
self.browser.find_by_id('id_telefono').first.fill('442343234234')
self.browser.find_by_id('id_correo').first.fill('abs@losabelosabe.com')
self.browser.select('nivel_estudios', '6_grado')
self.browser.find_by_id('id_fecha_de_nacimiento').first.click()
time.sleep(.2)
self.browser.find_by_css('.ui-datepicker-today').first.click()
self.browser.select('rol', 'tutor')
self.browser.select('relacion', 'padre')
self.browser.find_by_id('btn_send_create_user').click()
time.sleep(.1)
self.browser.find_by_css('.swal2-confirm').first.click()
# END CREATE FATHER
# CREATE MOTHER
self.browser.find_by_id('btn_modal_create_integrante').click()
time.sleep(0.3)
self.browser.find_by_id('id_nombres').first.fill('dona')
self.browser.find_by_id('id_apellidos').first.fill('martines')
self.browser.find_by_id('id_telefono').first.fill('442343234234')
self.browser.find_by_id('id_correo').first.fill('absb@losabelosabe.com')
self.browser.select('nivel_estudios', '6_grado')
self.browser.find_by_id('id_fecha_de_nacimiento').first.click()
time.sleep(.2)
self.browser.find_by_css('.ui-datepicker-today').first.click()
self.browser.select('rol', 'tutor')
self.browser.select('relacion', 'madre')
self.browser.find_by_id('btn_send_create_user').click()
time.sleep(.1)
self.browser.find_by_css('.swal2-confirm').first.click()
# END CREATE MOTHER
# CREATE SON
self.browser.find_by_id('btn_modal_create_integrante').click()
time.sleep(0.3)
self.browser.find_by_id('id_nombres').first.fill('junior')
self.browser.find_by_id('id_apellidos').first.fill('martines')
self.browser.find_by_id('id_telefono').first.fill('4423431234234')
self.browser.find_by_id('id_correo').first.fill('abssb@losabelosabe.com')
self.browser.select('nivel_estudios', '6_grado')
self.browser.find_by_id('id_fecha_de_nacimiento').first.click()
time.sleep(.2)
self.browser.find_by_css('.ui-datepicker-today').first.click()
self.browser.select('rol', 'alumno')
self.browser.select('escuela', '1')
self.browser.find_by_id('id_numero_sae').fill('123123')
self.browser.find_by_id('btn_send_create_user').click()
self.browser.find_by_css('.swal2-confirm').first.click()
# END CREATE SON
time.sleep(.1)
self.assertTrue(self.browser.is_text_present('don'))
self.assertTrue(self.browser.is_text_present('dona'))
self.assertTrue(self.browser.is_text_present('junior'))
self.browser.find_by_id('navigation_transacciones').click()
time.sleep(.1)
estudio = Estudio.objects.all().first()
desired_url = self.live_server_url + reverse(
'captura:list_transacciones',
kwargs={'id_familia': estudio.familia.id})
self.assertEqual(self.browser.url, desired_url)
self.browser.find_by_id('navigation_fotos').click()
desired_url = self.live_server_url + reverse(
'captura:list_photos',
kwargs={'id_estudio': estudio.id})
self.assertEqual(self.browser.url, desired_url)
self.browser.find_by_id('navigation_cuestionario').click()
desired_url = self.live_server_url + reverse(
'captura:contestar_estudio',
kwargs={'id_estudio': estudio.id, 'numero_seccion': 1})
self.assertEqual(self.browser.url, desired_url)
self.browser.find_by_css('.fa-file-text').first.click()
time.sleep(.1)
self.assertTrue(self.browser.is_text_present('Editar'))
self.browser.find_by_css('.glyphicon-pencil').first.click()
time.sleep(.1)
self.browser.find_by_id('navigation_subir').click()
time.sleep(.1)
self.browser.find_by_id('submit_estudio').click()
time.sleep(.1)
desired_url = self.live_server_url + reverse('captura:estudios')
self.assertEqual(self.browser.url, desired_url)
self.assertFalse(self.browser.is_text_present('Editar'))
estudio = Estudio.objects.all().first()
self.assertEqual(estudio.status, Estudio.REVISION)
|
erikiado/jp2_online
|
captura/test_views.py
|
Python
|
mit
| 47,333
|
[
"VisIt"
] |
901fbced32a6caa1668976090ea97ac24170f6a96cdbde0ee1197eb801b6ccde
|
from asap3 import *
from ase.md.verlet import VelocityVerlet
from ase.lattice.cubic import FaceCenteredCubic
from asap3.io.trajectory import *
from numpy import *
import sys, os, time
from asap3.testtools import ReportTest
from asap3.mpi import world
debug = 0
if debug == 1:
DebugOutput("makeverlet%d.log", nomaster=True)
elif debug == 2:
time.sleep(world.rank)
print "PID:", os.getpid()
time.sleep(20)
print_version(1)
#set_verbose(1)
ismaster = world.rank == 0
isparallel = world.size != 1
if world.size == 1:
cpulayout = None
elif world.size == 2:
cpulayout = [2,1,1]
elif world.size == 3:
cpulayout = [1,3,1]
elif world.size == 4:
cpulayout = [2,1,2]
delete = True
precision = 1e-8
def maketraj(atoms, t, nstep):
e = [atoms.get_potential_energy()]
print "Shape of force:", atoms.get_forces().shape
dyn = VelocityVerlet(atoms, 5*units.fs)
for i in range(nstep):
dyn.run(10)
energy = atoms.get_potential_energy()
e.append(energy)
if ismaster:
print "Energy: ", energy
if t is not None:
t.write()
return e
def checktraj(t, e, cpus=None):
i = 0
for energy in e:
atoms = t.get_atoms(i, cpus)
atoms.set_calculator(EMT())
ReportTest("Checking frame %d / cpus=%s" % (i, str(cpus)),
atoms.get_potential_energy(), energy, precision)
i += 1
if ismaster:
initial = FaceCenteredCubic(size=(10,10,10), symbol="Cu", pbc=(1,0,0))
else:
initial = None
if isparallel:
atoms = MakeParallelAtoms(initial, cpulayout)
else:
atoms = initial.copy()
atoms.set_calculator(EMT())
print "Writing trajectory"
traj = PickleTrajectory("traj1.nc", "w", atoms)
traj.write()
energies = maketraj(atoms, traj, 10)
traj.close()
if ismaster:
print "Reading trajectory (serial)"
traj = PickleTrajectory("traj1.nc")
checktraj(traj, energies)
if isparallel:
print "Reading trajectory (parallel)"
traj = PickleTrajectory("traj1.nc")
checktraj(traj, energies, cpulayout)
print "Repeating simulation"
atoms = traj.get_atoms(5, cpulayout)
atoms.set_calculator(EMT())
energies2 = maketraj(atoms, None, 5)
if ismaster:
for i in range(5):
ReportTest("Rerun[%d]" % (i,), energies2[i], energies[i+5], precision)
traj.close()
print "Appending to trajectory"
traj = PickleTrajectory("traj1.nc", "a")
atoms = traj.get_atoms(-1, cpulayout)
atoms.set_calculator(EMT())
traj.set_atoms(atoms)
energies2 = maketraj(atoms, traj, 5)
traj.close()
if ismaster:
print "Reading longer trajectory"
traj = PickleTrajectory("traj1.nc")
checktraj(traj, energies + energies2[1:])
if ismaster and delete:
print "Deleting trajectory"
os.unlink("traj1.nc")
ReportTest.Summary()
|
auag92/n2dm
|
Asap-3.8.4/Test/parallel/parallelTrajectories.py
|
Python
|
mit
| 2,781
|
[
"ASE"
] |
fe0153414237f3a171458589d3ac4122befd80d3f4b16dd8b485b14aadaf0722
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 08:10:06 2017
@author: a002028
"""
import os
import sys
current_path = os.path.dirname(os.path.realpath(__file__))[:-4]
if current_path not in sys.path:
sys.path.append(current_path)
import pandas as pd
import numpy as np
import time
import pickle
import utils
import core
import core.exceptions as exceptions
"""
#==============================================================================
#==============================================================================
"""
class ColumnDataHandler(object):
"""
"""
def __init__(self):
super().__init__()
"""
#==============================================================================
#==============================================================================
"""
class RowDataHandler(object):
"""
"""
def __init__(self):
super().__init__()
#==========================================================================
def _get_index_fields(self, data_keys=[], extra_field=[]):
"""
fp: filter_parameters
"""
exclude_params = list(self.filter_parameters.fields_index) + \
[self.filter_parameters.value_key] + \
[self.filter_parameters.qflag_key]
return list(p for p in self.filter_parameters.compulsory_fields \
if p not in exclude_params and p in data_keys) + \
self.filter_parameters.fields_index + extra_field
#==========================================================================
def _merge_df_string_columns(self, col_to_merge, new_key=u'new_key', sep=u'__'):
"""
"""
self.df[new_key] = self.df.get(col_to_merge[0]).astype(str).str.cat([ \
self.df.get(key).astype(str) for key in col_to_merge[1:] if key in self.df], sep=sep)
#==========================================================================
def _one_parameter_df_adjustments(self):
"""
fp: filter_parameters
"""
map_dict = {self.filter_parameters.value_key: self.filter_parameters.use_parameters,
self.filter_parameters.qflag_key: 'Q_'+self.filter_parameters.use_parameters}
# Deleting column that only contains parameter name
self._delete_columns_from_df(columns=self.filter_parameters.parameter_key)
# Changing column "VALUE" to parameter name and column "QFLAG" to Q_"parameter_name"
self._rename_columns_of_DataFrame(map_dict)
#==========================================================================
def _seperate_para_value_from_qflag(self, sep=''):
"""
"""
# Simply get the length of one seperated string
for para in self.para_list:
if np.any(self.df[para]):
length = len(self.df[para][self.df.index[self.df[para].notnull()][0]].split(sep))
break
if not 'length' in locals():
raise UserWarning('No data in file?')
for para in self.para_list:
self.df[para] = self.df[para].apply(lambda x: x.split(sep) if x else ['']*length)
self.df[[para,'Q_'+para]] = pd.DataFrame(self.df.get(para).values.tolist())
#==========================================================================
def _set_column_table_from_pivot_table(self, sort=True):
"""
fp: filter_parameters
"""
df_col = self.df.unstack() # necessary to create a new local dataframe here
df_col = df_col.reset_index()
self.df = df_col
if sort:
self.sort_dict_by_keys(sort_order=self.filter_parameters.sort_by_fields,
ascending_list=[True]*len(self.filter_parameters.sort_by_fields),
depth_head=self.filter_parameters.depth_key,
serno_head=self.filter_parameters.visit_id_key)
#==========================================================================
def _set_pivot_table(self, values, index):
"""
"""
self.df = pd.pivot_table(self.df, values=values, index=index, aggfunc='first')
#==========================================================================
def filter_row_data(self, data_filter_object=None, map_object=None):
"""
filters row data using
_one_parameter_df_adjustment() when self.one_parameter = True
_merge_df_string_columns() when self.one_parameter = False
"""
if self.one_parameter:
self._one_parameter_df_adjustments()
else:
self._merge_df_string_columns([self.filter_parameters.value_key, self.filter_parameters.qflag_key],
new_key=u'TEMP_VALUE',
sep='__')
index_fields = self._get_index_fields(data_keys=self.df.keys())
print(len(index_fields), index_fields)
self._set_pivot_table(u'TEMP_VALUE', index_fields)
#==========================================================================
def get_column_data_format(self):
"""
"""
if self.one_parameter:
pass
else:
self._set_column_table_from_pivot_table(sort=True)
self._seperate_para_value_from_qflag(sep='__')
# self.add_df(df_col, 'col', add_columns=False)
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
#class DataFrameHandler(object):
class DataFrameHandler(ColumnDataHandler, RowDataHandler):
"""
Holds functions to handle DataFrame operations
"""
def __init__(self):
super().__init__()
self.wb_id_header = 'MS_CD'
#==========================================================================
def _add_columns(self):
"""
updated 20190123 by Lena Viktorsson
added sample_id column
"""
print('in _add_columns')
# self.df['time'] = pd.Series(pd.to_datetime(self.df['SDATE'] + self.df['STIME'], format='%Y-%m-%d%H:%M'))
# df['latit_dec_deg'] = df['LATIT'].apply(utils.decmin_to_decdeg)
# df['longi_dec_deg'] = df['LONGI'].apply(utils.decmin_to_decdeg)
if not 'LATIT_DD' in self.df and 'LATIT_DM' in self.df:
self.df['LATIT_DD'] = self.df['LATIT_DM'].apply(utils.decmin_to_decdeg)
if not 'LONGI_DD' in self.df and 'LONGI_DM' in self.df:
self.df['LONGI_DD'] = self.df['LONGI_DM'].apply(utils.decmin_to_decdeg)
if 'LATIT_DD' in self.df and 'LONGI_DD' in self.df:
self.df['profile_key'] = self.df['SDATE'].apply(str) + \
' ' + \
self.df['STIME'].apply(str) + \
' ' + \
self.df['LATIT_DD'].apply(str) + \
' ' + \
self.df['LONGI_DD'].apply(str)
if 'SHARKID_MD5' in self.df.columns:
# use sharkid_md for sample id for biological datatypes
self.df['SAMPLE_ID'] = self.df.SHARKID_MD5
elif ('SERNO' in self.df.columns) and ('SHIPC' in self.df.columns):
# use year_seriesno_shipcod for sample_id for phys/chem datatype
self.df['SAMPLE_ID'] = self.df['MYEAR'].apply(str) + \
'_' + \
self.df['SERNO'] + \
'_' + \
self.df['SHIPC']
else:
self.df['SAMPLE_ID'] = self.df['SDATE'] + '_SCM'
#==========================================================================
def _add_field(self):
if self.filter_parameters.add_parameters:
self.df[self.filter_parameters.add_parameters] = ''
#==========================================================================
def _additional_filter(self):
""" Can be overwritten from child """
pass
#==========================================================================
def _apply_field_filter(self):
"""
Selects columns from dataframe
Adds a columns for the origin of the dataframe (filepath)
Organize the data format
"""
self._select_columns_from_df() # use only default fields
self._add_origin_columns(dtype=self.dtype, file_path=self.source) # MW
self._rename_param('No species in sample', self.filter_parameters.use_parameters)
self._organize_data_format()
#==========================================================================
def _calculate_data(self):
""" Can be overwritten from child """
self._add_waterbody_area_info()
#==========================================================================
def _add_origin_columns(self, dtype='', file_path=''):
"""
Created 20180419 by Magnus Wenzer
Updated 20180419 by Magnus Wenzer
Adds collumns for origin_dtype and origin_file_path
"""
self.df['origin_dtype'] = dtype
self.df['origin_file_path'] = os.path.basename(file_path)
#==========================================================================
def _add_waterbody_area_info(self):
print('in _add_waterbody_area_info')
#TODO:
# add if VISS_EU_CD not in df.columns add them from vfk-kod kolumn
wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
# wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id_list[0])
# TODO: remove this when fixed problem with WA-code for Inre Idefjordens
if '' in wb_id_list:
# utanför svensk EEZ
pass
if 'WA28238367' in wb_id_list:
# norska delen av Inre Idefjorden
pass
# self.column_data[self.source].loc[
# self.column_data[self.source][self.wb_id_header] == 'WA28238367', self.wb_id_header] = 'WA24081564'
# wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
if 'WA36808071' in wb_id_list:
# Idefjorden, norska delen?
pass
# self.column_data[self.source].loc[
# self.column_data[self.source][self.wb_id_header] == 'WA36808071', self.wb_id_header] = 'WA18466637'
# wb_id_list = self.column_data[self.source][self.wb_id_header].tolist()
if 'WATER_DISTRICT_CODE' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id)
new_list.append(wd_id)
self.column_data[self.source]['WATER_DISTRICT_CODE'] = new_list
if 'WATER_DISTRICT_NAME' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wd_name = self.mapping_objects['water_body'].get_waterdistrictname_for_water_body(wb_id)
new_list.append(wd_name)
self.column_data[self.source]['WATER_DISTRICT_NAME'] = new_list
if 'WATER_TYPE_AREA' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
type_name = self.mapping_objects['water_body'].get_type_area_name_for_water_body(wb_id)
new_list.append(type_name)
self.column_data[self.source]['WATER_TYPE_AREA'] = new_list
if 'WATER_BODY_NAME' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
wb_name = self.mapping_objects['water_body'].get_name_for_water_body(wb_id)
new_list.append(wb_name)
self.column_data[self.source]['WATER_BODY_NAME'] = new_list
if 'MS_CD' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
ms_cd_code = self.mapping_objects['water_body'].get_mscd_for_water_body(wb_id)
new_list.append(ms_cd_code)
self.column_data[self.source]['MS_CD'] = new_list
if 'VISS_EU_CD' not in self.column_data[self.source]:
new_list = []
for wb_id in wb_id_list:
eu_cd_code = self.mapping_objects['water_body'].get_visseucd_for_water_body(wb_id)
new_list.append(eu_cd_code)
self.column_data[self.source]['VISS_EU_CD'] = new_list
#==========================================================================
def _check_nr_of_parameters(self):
"""
If one_parameter: We only need to set filter to keep parameter. No need
to use pivot_table..
"""
if type(self.filter_parameters.use_parameters) != list:
self.one_parameter=True
else:
self.one_parameter=False
#==========================================================================
def _convert_format(self, key_list, as_type=np.unicode):
"""
"""
for key in key_list:
if key and key in self.df:
try:
self.df[key] = self.df[key].astype(as_type)
except:
print(u'Could not convert format for:', key, u'in DataFrame')
#==========================================================================
def _delete_columns_from_df(self, columns=[]):
"""
"""
self.df = self.df.drop(columns, axis=1, errors='ignore') # inplace=True ?
#==========================================================================
def _drop_duplicates(self, based_on_column=''):
self.df.drop_duplicates(subset=based_on_column, inplace=True)
#==========================================================================
def _filter_column_data(self, df, data_filter_object):
"""
Filters column file data and returns resulting dataframe
"""
boolean = data_filter_object.get_boolean(df)
if not len(boolean):
return df
return df.loc[df.index[boolean], :]
#==========================================================================
def _handle_column_data(self):
"""
"""
# cdh = ColumnDataHandler(DataFrameHandler)
self.sort_columns_of_df()
self.add_column_df()
self._calculate_data()
#==========================================================================
def _handle_row_data(self, append_row_data=True):
"""
Handles row data
Selects parameters
"""
self._select_parameters()
if append_row_data:
self.add_row_df()
if self.raw_data_copy:
self.save_data_as_txt(directory=self.export_directory,
prefix=u'Raw_format')
# rdh = RowDataHandler(DataFrameHandler)
self._additional_filter()
self.filter_row_data()
self.get_column_data_format()
# print(self.df.get('BQIm'))
self.sort_columns_of_df()
self.add_column_df()
# self.add_row_df()
self._calculate_data()
#==========================================================================
def _include_empty_cells(self, data=dict):
# if data is dataframe.. but not working properly
# mask = np.column_stack([data[col].str.contains('"empty"', na=False) for col in data])
# data.loc[mask.any(axis=1)] = ''
#TODO Make it nicer :D
for key in data.keys():
for i, value in enumerate(data.get(key)):
if value == '"empty"':
data[key][i] = ''
return data
#==========================================================================
def _map_parameter_list(self):
"""
"""
# TODO: for rowdata this row results in None type calling unique()
p_map = self.parameter_mapping.get_parameter_mapping(self.df.get(self.filter_parameters.parameter_key).unique())
p_list = list(p for p in p_map if p_map[p] in self.filter_parameters.use_parameters)
return p_map, p_list
#==========================================================================
def _organize_data_format(self):
"""
organize the data based on raw data format, either row or column data
"""
if self.raw_data_format == 'row':
self._handle_row_data()
elif self.raw_data_format == 'column':
self._handle_column_data()
#==========================================================================
def _recognize_format(self):
"""
recognize row or column format based on if there is a parameter_key specified in the filter filer for the datatype
Then sets raw_data attribute to 'row' or 'column'
"""
# TODO why is parameter_key attribute a list for rowdata?
# print(self.filter_parameters.parameter_key)
# print(self.df.keys())
if self.filter_parameters.parameter_key in self.df: #'PARAM' in data header
self.raw_data_format = 'row'
else:
self.raw_data_format = 'column'
#TODO elif recognize netcdf..
#==========================================================================
def _remap_header(self):
"""
remaps header in file according to parameter_mapping file
:return:
"""
# for k in self.df.columns.values:
# print(k)
map_dict = self.parameter_mapping.get_parameter_mapping(self.df.columns.values)
self._rename_columns_of_DataFrame(map_dict)
#==========================================================================
def _rename_columns_of_DataFrame(self, mapping_dict):
"""
"""
self.df = self.df.rename(index=str, columns=mapping_dict)
# ==========================================================================
def _rename_param(self, original_name, new_name):
"""
overwritten in dataframehandler for zoobenthos
"""
print('what?!')
pass
#==========================================================================
def _select_columns_from_df(self):
"""
Keeps only the columns specified in compulsory fields in the datatypes filter file
"""
if self.raw_data_format == 'row':
self._delete_columns_from_df(columns=list(x for x in \
self.df.keys() if x not in self.filter_parameters.compulsory_fields))
elif self.raw_data_format == 'column':
self._delete_columns_from_df(columns=list(x for x in \
self.df.keys() if x not in \
self.filter_parameters.compulsory_fields + \
self.filter_parameters.use_parameters + \
[u'Q_'+p for p in self.filter_parameters.use_parameters]))
#==========================================================================
def _select_parameters(self):
"""
Can be rewritten in child-class, eg. DataHandlerPhytoplankton
First checks number of parameters that should be used and stores as a boolean attribute in self.one_parameter
for later formatting to columns format in ...
"""
self._check_nr_of_parameters()
p_map, p_list = self._map_parameter_list()
self.para_list = self.parameter_mapping.map_parameter_list(p_list)
for para in p_list:
# Change parameter name according to parameter codelist
self.df[self.filter_parameters.parameter_key] = np.where(self.df[self.filter_parameters.parameter_key]==para,
p_map[para],
self.df[self.filter_parameters.parameter_key])
# indices = np.where( self.df[parameter_head] == params_to_use[:,None] )[0]
# indices = np.where( self.df[self.filter_parameters.parameter_key].isin(self.para_list) )[0]
# self.df = self.df.iloc[indices,:]
boolean = self.df[self.filter_parameters.parameter_key].isin(self.para_list)
self.df = self.df.loc[boolean,:]
#==========================================================================
def add_column_df(self, add_columns=True):
"""
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# Should always be true?
if add_columns:
self._add_columns()
self.column_data[self.source] = self.df.copy(deep=True) # One DataFrame per source
# self.column_data = self.column_data.append(self.df, ignore_index=True).fillna('')
#==========================================================================
def add_row_df(self, add_columns=False):
"""
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# Should always be false?
if add_columns:
self._add_columns()
self.row_data[self.source] = self.df.copy(deep=True)
# self.row_data = self.row_data.append(self.df, ignore_index=True).fillna('')
#==========================================================================
def filter_data(self, data_filter_object, filter_id=''):
"""
Filters data according to data_filter_object.
data_filter_object is a core.filters.DataFilter-object.
Returns a DataHandler object with the filtered data.
"""
new_data_handler = DataHandler(self.source + '_filtered_%s' % filter_id)
if len(self.column_data):
# print( 'data_filter_object', data_filter_object)
df = self._filter_column_data(self.column_data, data_filter_object)
if data_filter_object.parameter:
# print('df', df.columns)
# print('data_filter_object.parameter:', data_filter_object.parameter)
for col in list(df.columns):
if col not in core.ParameterList().metadata_list + [data_filter_object.parameter]:
df = df.drop(col, 1)
new_data_handler.add_df(df, 'column')
if len(self.row_data):
df = self._filter_row_data(self.row_data, data_filter_object)
new_data_handler.add_df(df, 'row')
return new_data_handler
#==========================================================================
def get_dict(self, data, drop_nans=True, drop_empty=True):
"""
"""
if drop_nans:
# Index does not matter for the returned dictionary
return { key : list(data.get(key).dropna(axis=0)) for key in data}
else:
return { key : list(data.get(key)) for key in data}
#==========================================================================
def get_index_for_profile_key(self, profile_key):
"""
Method to get index for a unique profile key.
profile_key is "time LATIT LONGI"
"""
return self.column_data.index[self.column_data['profile_key'] == profile_key]
#==========================================================================
def get_profile_key_list(self, year=None):
"""
Returns a list och unique combinations of pos and time.
"""
if year:
return sorted(set(self.column_data.loc[self.column_data['MYEAR'] == year, 'profile_key']))
else:
return sorted(set(self.column_data['profile_key']))
#==========================================================================
def load_source(self, file_path=u'', sep='\t', encoding='cp1252', raw_data_copy=False):
"""
Created by Johannes
Updated 20180419 by Magnus Wenzer
Can be rewritten in child-class, eg. DataHandlerPhytoplankton
"""
self.source = file_path
self.raw_data_copy = raw_data_copy
self.df = core.Load().load_txt(file_path, sep=sep, encoding=encoding, fill_nan=u'')
self._remap_header()
self._recognize_format()
self._apply_field_filter()
#==========================================================================
def delete_source(self, file_path):
"""
Created 20180422 by Magnus Wenzer
Updated 20180422 by Magnus Wenzer
Deletes a sourcs in the data handler.
"""
if file_path in self.column_data.keys():
self.column_data.pop(file_path)
#==========================================================================
def read_filter_file(self, file_path=u'', get_as_dict=True):
"""
"""
data = core.Load().load_txt(file_path, fill_nan=np.nan)
if get_as_dict:
data = self.get_dict(data)
data = self._include_empty_cells(data=data)
# print(data)
self.filter_parameters = core.AttributeDict()
self.filter_parameters._add_arrays_to_entries(**data)
#==========================================================================
def save_data_as_txt(self, directory=u'', prefix=u''):
"""
"""
if not directory:
return False
# directory = os.path.dirname(os.path.realpath(__file__))[:-4] + 'test_data\\test_exports\\'
if not directory.endswith(('/','\\')):
directory = directory + '/'
file_path = directory + '_'.join([prefix, self.dtype, 'data.txt'])
print(u'Saving data to:',file_path)
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
self.df.to_csv(file_path, sep='\t', encoding='cp1252', index=False)
#==========================================================================
def save_column_data(self, file_path):
"""
Created: 20180422 by Magnus Wenzer
Last modified: 20180422 by Magnus Wenzer
"""
pickle.dump(self.column_data, open(file_path, "wb"))
#==========================================================================
def sort_dict_by_keys(self,
sort_order=[],
ascending_list=[],
depth_head=None,
serno_head=None,
drop_index=True):
"""
sort_order: key list in sorting order
['key_1','key_2','key_3']
ascending_list: ascending sorting or not (key specific)
[True,False,True]
return_as_dataframe: return as pandas Dataframe
"""
print(u'Sorting..')
if any([depth_head, serno_head]):
self._convert_format([depth_head, serno_head], as_type=np.float)
self.df = self.df.sort_values(sort_order, ascending=ascending_list)
if any([depth_head, serno_head]):
self._convert_format([depth_head, serno_head], as_type=np.unicode)
if drop_index:
print(u'Resetting and Dropping INDEX')
self.df = self.df.reset_index().drop([u'index'], axis=1)
#==========================================================================
def sort_columns_of_df(self):
sort_order = [key for key in self.filter_parameters.compulsory_fields if key in self.df]
if utils.is_sequence(self.filter_parameters.use_parameters):
for para in self.filter_parameters.use_parameters:
if para in self.df:
sort_order.append(para)
if 'Q_'+para in self.df:
sort_order.append('Q_'+para)
else:
if self.filter_parameters.use_parameters in self.df:
sort_order.append(self.filter_parameters.use_parameters)
if 'Q_'+self.filter_parameters.use_parameters in self.df:
sort_order.append('Q_'+self.filter_parameters.use_parameters)
sort_order.extend(['origin_dtype', 'origin_file_path'])
self.df = self.df[sort_order]
# self.df = self.df.ix[:, sort_order]
# self.df.reindex_axis(sort_order, axis=1) # DOES NOT WORK PROPERLY
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class NETCDFDataHandler(DataFrameHandler):
"""
"""
def __init__(self,
export_directory=''):
super().__init__()
self.export_directory = export_directory
#==========================================================================
def load_nc(self):
pass
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemical(DataFrameHandler):
"""
"""
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__()
self.dtype = 'physicalchemical'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.no_qflags = no_qflags
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
#self.check_waterbody_id()
self._add_waterbody_area_info()
# print('_calculate_data')
if self.no_qflags:
self.calculate_din()
else:
self.calculate_din(ignore_qf_list=['B','S'])
# ==========================================================================
def check_waterbody_id(self):
"""
Checks for columns without waterbody id and tries to find waterbody id from waterbodyname
:return: nothing
"""
#TODO: this was started when trying to use data from outside the Swedish EEZ
# that does not have a wb_id but a wb_name. Problem the wb_name has more then one wb_id
wb_name_list = self.df.loc[self.df[self.wb_id_header] == '', "WATER_BODY_NAME"].unique()
for wb_name in wb_name_list:
temp_df = self.mapping_objects['water_body'].get('water_bodies')
wb_id = temp_df.loc[(temp_df["WATERBODY_NAME"] == wb_name) & (temp_df["WB"] == 'Y'), self.wb_id_header]
wb_id_df = temp_df.loc[(temp_df["WATERBODY_NAME"] == wb_name) & (temp_df["WB"] == 'Y')]
if len(wb_id) == 1:
wb_id = wb_id.values[0]
else:
wb_id = wb_id.values[0]
#raise Exception('more than one wb id exists for the waterbody name {}'.format(wb_name))
self.df[self.wb_id_header] == ''
self.df['WATER_BODY_NAME'] == wb_name
self.df.loc[(self.df[self.wb_id_header] == '') & (self.df['WATER_BODY_NAME'] == wb_name),
self.wb_id_header] = wb_id
#==========================================================================
def calculate_din(self, ignore_qf_list=[]):
"""
Returns a vector calculated DIN.
If NO3 is not present, value is np.nan
TODO: add take qflags into consideration?
"""
din_list = []
for no2, no3, nox, nh4 in zip(*self.get_nxx_lists(ignore_qf_list)):
if np.isnan(nox):
din = np.nan
if not np.isnan(no3):
din = no3
if not np.isnan(no2):
din += no2
if not np.isnan(nh4):
din += nh4
else:
din = nox
if not np.isnan(nh4):
din += nh4
if np.isnan(din):
din=''
else:
din = str(round(din, 2))
din_list.append(din)
if not 'DIN' in self.column_data:
self.column_data[self.source]['DIN'] = din_list
else:
self.column_data[self.source]['DIN_calulated'] = din_list
#==========================================================================
def get_float_list(self, key, ignore_qf=[]):
"""
Get all values as floats
"""
return utils.get_float_list_from_str(df=self.column_data[self.source],
key=key, ignore_qf=ignore_qf)
#==========================================================================
def get_nxx_lists(self, ignore_qf_list):
"""
Returns 4 equal in length arrays for NO2, NO3, NO23, NH4..
If a parameter does not excist in the loaded dataset, an array filled
with NaNs is returned for that specific parameter
"""
if 'NTRI' in self.column_data[self.source]:
ntri = self.get_float_list(key='NTRI', ignore_qf=ignore_qf_list)
else:
ntri = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRA' in self.column_data[self.source]:
ntra = self.get_float_list(key='NTRA', ignore_qf=ignore_qf_list)
else:
ntra = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRZ' in self.column_data[self.source]:
ntrz = self.get_float_list(key='NTRZ', ignore_qf=ignore_qf_list)
else:
ntrz = [np.nan]*self.column_data[self.source].shape[0]
if 'AMON' in self.column_data[self.source]:
amon = self.get_float_list(key='AMON', ignore_qf=ignore_qf_list)
else:
amon = [np.nan]*self.column_data[self.source].shape[0]
return ntri, ntra, ntrz, amon
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemicalSatellite(DataHandlerPhysicalChemical):
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__(filter_path=filter_path,
export_directory=export_directory,
parameter_mapping=parameter_mapping,
no_qflags=no_qflags,
mapping_objects = mapping_objects)
self.dtype = 'physicalchemicalsatellite'
#==========================================================================
def _calculate_data(self):
self._set_position()
self._add_waterbody_area_info()
def _set_position(self):
"""
set position of waterbody based on VISS_EU_CD code, this information is not available in MS_CD so should not be self.wb_id_header
:return:
"""
# x=self.column_data[self.source]['VISS_EU_CD'][0]
# print(x[2:4],x[4:6],x[6:8],x[9:11],x[11:13],x[13:15])
# print(int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600)
# print(int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600)
# lat=int(x[2:4])+float(x[4:6])/60+float(x[6:8])/60
self.column_data[self.source]['LATIT_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600 if 'C' not in x else np.nan)
self.column_data[self.source]['LONGI_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600 if 'C' not in x else np.nan)
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhysicalChemicalModel(DataFrameHandler):
"""
"""
def __init__(self,
filter_path=u'',
export_directory='',
parameter_mapping=None,
no_qflags=False,
mapping_objects = None): # no_qflags for data that has no quality flags (model data..)
super().__init__()
self.dtype = 'physicalchemicalmodel'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.no_qflags = no_qflags
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _add_serno(self):
"""
adds date as serno
:return:
"""
self.column_data[self.source]['SERNO'] = self.column_data[self.source]['SDATE'].copy()
# ==========================================================================
def _add_shipc(self):
"""
adds SCM as shipcode
:return:
"""
self.column_data[self.source]['SHIPC'] = 'SCM'
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
# print('_calculate_data')
if self.no_qflags:
self.calculate_din()
else:
self.calculate_din(ignore_qf_list=['B','S'])
self._add_waterbody_area_info()
self._set_position()
#==========================================================================
def _set_position(self):
"""
set position of waterbody based on VISS_EU_CD code, this information is not available in MS_CD so should not be self.wb_id_header
:return:
"""
# x=self.column_data[self.source]['VISS_EU_CD'][0]
# print(x[2:4],x[4:6],x[6:8],x[9:11],x[11:13],x[13:15])
# print(int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600)
# print(int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600)
# lat=int(x[2:4])+float(x[4:6])/60+float(x[6:8])/60
self.column_data[self.source]['LATIT_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[2:4])+int(x[4:6])/60+int(x[6:8])/3600 if 'C' not in x else np.nan)
self.column_data[self.source]['LONGI_DD'] = self.column_data[self.source]['VISS_EU_CD'].apply(lambda x: int(x[9:11])+int(x[11:13])/60+int(x[13:15])/3600 if 'C' not in x else np.nan)
#==========================================================================
def calculate_din(self, ignore_qf_list=[]):
"""
Returns a vector calculated DIN.
If NO3 is not present, value is np.nan
"""
din_list = []
for no2, no3, nox, nh4 in zip(*self.get_nxx_lists(ignore_qf_list)):
if np.isnan(nox):
din = np.nan
if not np.isnan(no3):
din = no3
if not np.isnan(no2):
din += no2
if not np.isnan(nh4):
din += nh4
else:
din = nox
if not np.isnan(nh4):
din += nh4
if np.isnan(din):
din=''
else:
din = str(round(din, 2))
din_list.append(din)
if not 'DIN' in self.column_data:
self.column_data[self.source]['DIN'] = din_list
else:
self.column_data[self.source]['DIN_calulated'] = din_list
#==========================================================================
def get_float_list(self, key, ignore_qf=[]):
"""
Get all values as floats
"""
return utils.get_float_list_from_str(df=self.column_data[self.source],
key=key, ignore_qf=ignore_qf)
#==========================================================================
def get_nxx_lists(self, ignore_qf_list):
"""
Returns 4 equal in length arrays for NO2, NO3, NO23, NH4..
If a parameter does not excist in the loaded dataset, an array filled
with NaNs is returned for that specific parameter
"""
if 'NTRI' in self.column_data[self.source]:
ntri = self.get_float_list(key='NTRI', ignore_qf=ignore_qf_list)
else:
ntri = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRA' in self.column_data[self.source]:
ntra = self.get_float_list(key='NTRA', ignore_qf=ignore_qf_list)
else:
ntra = [np.nan]*self.column_data[self.source].shape[0]
if 'NTRZ' in self.column_data[self.source]:
ntrz = self.get_float_list(key='NTRZ', ignore_qf=ignore_qf_list)
else:
ntrz = [np.nan]*self.column_data[self.source].shape[0]
if 'AMON' in self.column_data[self.source]:
amon = self.get_float_list(key='AMON', ignore_qf=ignore_qf_list)
else:
amon = [np.nan]*self.column_data[self.source].shape[0]
return ntri, ntra, ntrz, amon
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerZoobenthos(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'zoobenthos'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
#self._add_obspoint()
#==========================================================================
def _rename_param(self, original_name, new_name):
"""
renames row with original_nam to new_name parameter_key column
:param original_name: the parameter name that should be changed
:param new_name: the new name for the parameter
:return:
"""
print('hello')
mapping_dict = self.parameter_mapping.get_parameter_mapping([self.filter_parameters.rename_parameter])
temp = self.df.loc[self.df[self.filter_parameters.parameter_key] == original_name,
self.filter_parameters.parameter_key]
self.df.loc[self.df[self.filter_parameters.parameter_key] == original_name,
self.filter_parameters.parameter_key] = new_name
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerChlorophyll(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'chlorophyll' # Only Tube samples ?
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
#==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandlerPhytoplankton(DataFrameHandler):
"""
"""
def __init__(self, filter_path=u'',
export_directory='',
parameter_mapping=None,
mapping_objects = None):
super().__init__()
self.dtype = 'phytoplankton'
self.export_directory = export_directory
self.read_filter_file(file_path=filter_path)
self.parameter_mapping = parameter_mapping
self.mapping_objects = mapping_objects
self.column_data = {} #pd.DataFrame()
self.row_data = {} #pd.DataFrame()
# ==========================================================================
def _additional_filter(self):
self._delete_columns_from_df(columns=self.filter_parameters.extra_fields + [self.filter_parameters.value_key])
self._drop_duplicates(based_on_column='SHARKID_MD5')
# TODO: check if this overwrites earlier info and then why
self.filter_parameters.use_parameters = 'BIOV_CONC_ALL'
# ==========================================================================
def _calculate_data(self):
"""
Rewritten from parent
If there are no quality flags in data self.no_qflags is initialized
as True
"""
self._add_waterbody_area_info()
# ==========================================================================
def _extended_filter_for_phytoplanton_data(self):
"""
Selects parameters and TROPHIC-status according to
self.filter_parameters
"""
self.df = utils.set_filter(df=self.df,
filter_dict={self.filter_parameters.parameter_key : self.para_list,
self.filter_parameters.trophic_key : self.filter_parameters.use_trophic},
return_dataframe=True)
#==========================================================================
def _get_total_biovolume(self, samp_key=''):
"""
Created: 2017 by Johannes Johansson
Modified: 20180320 by Lena Viktorsson (changes df.astype(np.float) to pd.to_numeric(df))
"""
# keys could be set in filter_parameters instead..
# print(self.df.get(samp_key).unique)
for sample in self.df.get(samp_key).unique():
boolean = utils.set_filter(df=self.df,
filter_dict={samp_key:sample})
#tot_value = self.df.loc[boolean,self.filter_parameters.value_key].astype(np.float).sum(skipna=True)
tot_value = pd.to_numeric(self.df.loc[boolean,self.filter_parameters.value_key]).sum(skipna=True)
self.df.loc[boolean, self.filter_parameters.add_parameters] = str(tot_value)
#==========================================================================
def _select_parameters(self):
"""
Rewritten from parent-class
"""
#spara undan och sedan delete .extra_fields
# spara undan som kolumnformat
self._check_nr_of_parameters()
p_map, p_list = self._map_parameter_list()
self.para_list = self.parameter_mapping.map_parameter_list(p_list)
for para in p_list:
# Change parameter name according to parameter codelist
#TODO CHeck if this variant of np.where works with pandas irregular index..
self.df[self.filter_parameters.parameter_key] = np.where(self.df[self.filter_parameters.parameter_key]==para,
p_map[para],
self.df[self.filter_parameters.parameter_key])
self._extended_filter_for_phytoplanton_data()
self._add_field()
self._get_total_biovolume(samp_key='SHARKID_MD5')
#==========================================================================
"""
#==============================================================================
#==============================================================================
"""
class DataHandler(object):
"""
Class to hold data.
"""
#TODO metod för att kontrollera odeffinerade datafiler, vilken datatyp är
#det som gäller? input från användaren eller datafilen.. finns inte datatyp
#i filen? säg till användaren.. när vi vet datatyp, spara filnamn i fil
#TODO check dubblett
def __init__(self,
input_data_directory=None,
resource_directory=None,
mapping_objects=None,
wb_id_header=None):
# print(input_data_directory, resource_directory)
assert all([input_data_directory, resource_directory])
super().__init__()
# self.source = source
# self.column_data = pd.DataFrame()
# self.row_data = pd.DataFrame()
self.input_data_directory = input_data_directory
self.resource_directory = resource_directory
# TODO: Maybe WorkSpace should specify these too
self.raw_data_directory = self.input_data_directory + '/raw_data'
self.export_directory = self.input_data_directory + '/exports'
path_parameter_mapping = self.resource_directory + '/mappings/mapping_parameter_dynamic_extended.txt'
path_fields_filter = self.resource_directory + '/filters/'
self.mapping_objects = mapping_objects
self.wb_id_header = wb_id_header
# path_parameter_mapping = current_path + u'/test_data/mappings/mapping_parameter_dynamic_extended.txt'
# path_fields_filter = current_path + u'/test_data/filters/'
self._load_field_mapping(file_path=path_parameter_mapping)
# All datatypes that might include data for setting ecological status
self.all_datatypes = [u'chlorophyll',
u'physicalchemical',
u'physicalchemicalsatellite',
u'physicalchemicalmodel',
u'phytoplankton',
u'zoobenthos']
#TODO lägg in datatypsobject i dict ? seperate sources as keys... 'phyche_source' DONE!
self.chlorophyll = DataHandlerChlorophyll(filter_path=path_fields_filter+u'filter_fields_chlorophyll_integrated.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemical = DataHandlerPhysicalChemical(filter_path=path_fields_filter+'filter_fields_physical_chemical.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemicalsatellite = DataHandlerPhysicalChemicalSatellite(filter_path=path_fields_filter+'filter_fields_physical_chemical_satellite.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.physicalchemicalmodel = DataHandlerPhysicalChemicalModel(filter_path=path_fields_filter+'filter_fields_physical_chemical_model.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
no_qflags=True,
mapping_objects = self.mapping_objects)
self.phytoplankton = DataHandlerPhytoplankton(filter_path=path_fields_filter+u'filter_fields_phytoplankton.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.zoobenthos = DataHandlerZoobenthos(filter_path=path_fields_filter+'filter_fields_zoobenthos.txt',
export_directory=self.export_directory,
parameter_mapping=self.parameter_mapping,
mapping_objects = self.mapping_objects)
self.float_parameters = []
for data_type in [self.__getattribute__(dtype) for dtype in self.all_datatypes]:#[self.chlorophyll, self.physicalchemical, self.physicalchemicalsatellite, self.physicalchemicalmodel, self.phytoplankton, self.zoobenthos]:
if isinstance(data_type.filter_parameters.use_parameters, str):
self.float_parameters = self.float_parameters + [data_type.filter_parameters.use_parameters]
else:
self.float_parameters = self.float_parameters + data_type.filter_parameters.use_parameters
# self.all_data = None
self.all_data = pd.DataFrame() # MW
#==========================================================================
def _load_field_mapping(self, file_path=u''):
"""
"""
self.parameter_mapping = core.ParameterMapping()
self.parameter_mapping.load_mapping_settings(file_path=file_path)
#==========================================================================
def add_df(self, pd_df, data_type, add_columns=False):
"""
Updated 20180828 by Magnus
Adds data to the internal data structure.
"""
# Add columns (time etc.)
# This is never used from here, and should it be?
# This is called from filter_data & add_txt_file, there should be no columns added when filtering arr adding df
if add_columns:
self._add_columns(pd_df)
if 'col' in data_type:
self.column_data = self.column_data.append(pd_df, ignore_index=True)
# Remove duplicate rows
self.column_data.drop_duplicates(inplace=True) # MW: 20180828
elif 'row' in data_type:
self.row_data = self.row_data.append(pd_df, ignore_index=True).fillna('')
# Remove duplicate rows
self.row_data.drop_duplicates(inplace=True) # MW: 20180828
# print(self.data_phys_chem.head())
#==========================================================================
# def add_txt_file(self, file_path, data_type):
def add_txt_file(self, file_path, data_type, map_object=None):
data = pd.read_csv(file_path, sep='\t', encoding='cp1252')
if map_object != None:
map_dict = map_object.get_parameter_mapping( data.columns.values )
data = self._rename_columns_of_DataFrame( data, map_dict )
self.add_df(data, data_type)
# TODO: Check if all is ok
# #==========================================================================
# def filter_data(self, data_filter_object, filter_id=''):
# """
# Filters data according to data_filter_object.
# data_filter_object is a core.filters.DataFilter-object.
# Returns a DataHandler object with the filtered data.
# """
# new_data_handler = DataHandler(self.source + '_filtered_%s' % filter_id)
# if len(self.column_data):
## print( 'data_filter_object', data_filter_object)
# df = self._filter_column_data(self.column_data, data_filter_object)
# if data_filter_object.parameter:
## print('df', df.columns)
## print('data_filter_object.parameter:', data_filter_object.parameter)
# for col in list(df.columns):
# if col not in core.ParameterList().metadata_list + [data_filter_object.parameter]:
# df = df.drop(col, 1)
# new_data_handler.add_df(df, 'column')
# if len(self.row_data):
# df = self._filter_row_data(self.row_data, data_filter_object)
# new_data_handler.add_df(df, 'row')
#
# return new_data_handler
# #==========================================================================
# def _filter_column_data(self, df, data_filter_object):
# """
# Filters column file data and returns resulting dataframe
# """
# #TODO kolla på flera DF ? annan struktur ?
# boolean = data_filter_object.get_column_data_boolean(df)
#
# if not len(boolean):
# return df
# return df.loc[df.index[boolean], :]
#==========================================================================
def get_all_column_data_df(self, boolean_filter=[]):
"""
mw
Returns a pandas dataframe that contains all data in column format.
boolean_filter is a pd.Series. If not given the whole df is returned.
"""
# TODO: what do we return when boolean_filter is False because no filter har been set for the key given?
if len(boolean_filter):
# TODO: Check length
return self.all_data.loc[boolean_filter, :]
else:
return self.all_data
#==========================================================================
def merge_all_data(self, save_to_txt=False):
"""
Created:
Last modified: 20180720 by Magnus Wenzer
- Do we need to sort all_data ?
- Merge data from different datatypes for the same visit ?
"""
self.all_data = pd.DataFrame()
# All datatypes that might include data for setting ecological status
# all_datatypes = [u'chlorophyll',
# u'physicalchemical',
# u'physicalchemicalsatellite',
# u'physicalchemicalmodel',
# u'phytoplankton',
# u'zoobenthos']
# TODO: vart ska vi kolla mandatory keys? och vart ska de läsas in?
mandatory_keys = []#['DEPH']
for dtype in self.all_datatypes:
if dtype in dir(self):
# print(dtype)
# print(self.__getattribute__(dtype).column_data)
# Appends dataframes from each datatype into one dataframe
for source in self.__getattribute__(dtype).column_data:
# Each datatype might have multiple sources..
# .column_data is a dict in each datatypes DataFrameHandler object
df = self.__getattribute__(dtype).column_data[source]
if not all([item in df.columns for item in mandatory_keys]):
raise exceptions.MissingKeyInData(message=os.path.basename(source))
if any(df.columns.duplicated()):
print('duplicates in data from source {} \n duplicate columns {}'.format(source, df[df.columns.duplicated()]))
raise exceptions.MissingKeyInData(message=os.path.basename(source))
self.all_data = self.all_data.append(df,
ignore_index=True)
if not len(self.all_data):
print('No data available after "merge_all_data"!')
return False
# Save pkl-file for all_data_raw. Updated 20180525 by Magnus Wenzer
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data, file_name='all_data_raw', force_save_txt=True, only_pkl=not save_to_txt)
# pickle.dump(self.all_data, open(self.export_directory + "/all_data_raw.pickle", "wb"))
# if save_to_txt:
# save_data_file(df=self.all_data,
# directory=self.export_directory,
# file_name='all_data.txt')
# Load data again. This way we can treet new and old
#"self.all_data" the same way
self.all_data = pd.DataFrame()
self.load_all_datatxt()
#==========================================================================
def load_datatypetxt(self, datatype, sep='\t', encoding='cp1252'):
"""
loads existing data files for the given datatype from export directory (from pickle if existing, otherwise from txt)
Created: 20180422 by Magnus Wenzer
Last modified: 20180422 by Magnus Wenzer
"""
# Column data file
try:
file_path = '{}/column_format_{}_data.pickle'.format(self.export_directory, datatype)
# pd_df = pickle.load(open(file_path, "rb"))
# self.add_df(pd_df, data_type) # here data_type is row or col
# TODO: should this really say self.column_data = ? It will then replace anything already in self.column_data with new content.
# self.column_data = pickle.load(open(file_path, "rb"))
self.__getattribute__(datatype).column_data = pickle.load(open(file_path, "rb"))
return True
except (OSError, IOError) as e:
return False
# try:
# file_path = '{}/column_format_{}_data.txt'.format(self.export_directory, datatype)
# self.column_data = load_data_file(file_path)
# except:
# return False
# # Raw data file
# file_path = '{}/raw_format_{}_data.txt'.format(self.export_directory, datatype)
# try:
# self.row_data = load_data_file(file_path)
# except (OSError, IOError) as e:
# return False
#
# return True
#==========================================================================
def load_all_datatxt(self, sep='\t', encoding='cp1252'):
"""
loads existing all_data file from export directory (from pickle if existing, otherwise from txt)
Created: 20180318 by Lena Viktorsson
Last modified: 20180525 by Magnus Wenzer
"""
def float_convert(x):
try:
return float(x)
except:
# print('float_convert')
return np.nan
def str_convert(x):
x = str(x)
if x == 'nan':
x = ''
return x
# print('self.all_data', len(self.all_data))
if len(self.all_data):
print('self.all_data length', len(self.all_data, 'continue to load all_data'))
# return False, False
else:
sld_object = core.SaveLoadDelete(self.export_directory) # 20180525 by Magnus Wenzer
try:
self.all_data = sld_object.load_df('all_data', load_txt=False) # 20180525 by Magnus Wenzer
# print()
# with open(self.export_directory + "/all_data.pkl", "rb") as fid:
# self.all_data = pickle.load(fid)
filetype = 'pickle'
print('all_data loaded from pickle')
except (FileNotFoundError, UnboundLocalError) as e:
# UnboundLocalError is for when df was not created in sld_object.load_df()
print('setting up all_data all_data_raw.pkl')
try:
self.all_data = sld_object.load_df('all_data_raw', load_txt=False) # 20180525 by Magnus Wenzer
# self.all_data = pickle.load(open(self.export_directory + "/all_data_raw.pickle", "rb"))
except (OSError, IOError) as e:
raise(OSError, IOError, 'Raw data pickle file does not exist! This is created during in "merge_all_data".')
# self.all_data = load_data_file(self.export_directory + '/all_data.txt')
# self.all_data = core.Load().load_txt(self.export_directory + '/all_data.txt', sep=sep, encoding=encoding, fill_nan=u'')
#TODO: better way to say which columns should be converted to float and int?
self.all_data['MONTH'] = self.all_data['SDATE'].apply(lambda x: int(x[5:7]))
self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4]))
# try:
# self.all_data['MYEAR'] = self.all_data['MYEAR'].astype(int)
# except KeyError:
self.all_data['MYEAR'] = self.all_data['YEAR']
# self.all_data['YEAR'] = self.all_data['SDATE'].apply(lambda x: int(x[0:4])).astype(int)
# TODO: does not work with only datatypes that does not have column DEPH, example zoobenthos
self.all_data['DEPH'] = self.all_data['DEPH'].apply(lambda x: float(x) if x else np.nan)
self.all_data['POSITION'] = self.all_data.apply(lambda x: '{0:.2f}'.format(float_convert(x.LATIT_DD)) + '_' + '{0:.2f}'.format(float_convert(x.LONGI_DD)), axis = 1)
if 'STATN' not in self.all_data.columns:
self.all_data['STATN'] = self.all_data[self.wb_id_header]
statn = self.all_data.STATN.tolist()
pos = self.all_data.POSITION.tolist()
for i, x in enumerate(statn):
if x == "":
statn[i] = pos[i]
# set all station names to uppercase to limit number of synonyms
self.all_data['STATN'] = [s.upper() for s in statn]
if 'MNDEP' not in self.all_data.columns:
self.all_data['MNDEP'] = np.nan
self.all_data['MXDEP'] = np.nan
# MW: Add visit_id
# TODO: in all places where this is used change to use sample_id instead and remove this
self.all_data['visit_id_str'] = self.all_data[self.wb_id_header] + \
self.all_data['POSITION'] + \
self.all_data['SDATE'] + \
self.all_data['STIME']
for col in self.all_data.columns:
if col.startswith('Q_'):
par = col[2:]
self.all_data[par] = self.all_data[par].apply(float_convert)
self.all_data[col] = self.all_data[col].apply(str_convert)
# TODO: send info to user
elif col in ['DIN', 'CPHL_BTL', 'CPHL_SAT','WADEP', 'MNDEP', 'MXDEP']:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif col in self.float_parameters:
self.all_data[col] = self.all_data[col].apply(float_convert)
elif self.wb_id_header == 'VISS_EU_CD' and col == self.wb_id_header:
self.all_data[col] = self.all_data[col].apply(lambda x: 'SE' + x if 'SE' not in x else x)
else:
pass
self.all_data['STIME'] = self.all_data['STIME'].apply(lambda x: x[:5])
# MW 20180716
# TODO: Speed up, problem here areaf ew data with day 00. Maybe find those and exclude and then do pd.to_datetime
try:
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
except ValueError:
remove_index = []
for row_index in self.all_data.index:
try:
pd.to_datetime(self.all_data.iloc[row_index].SDATE)
except ValueError:
#self.all_data.loc[row_index, 'SDATE'] = ''
remove_index.append(row_index)
sld_object = core.SaveLoadDelete(self.export_directory)
sld_object.save_df(self.all_data.iloc[remove_index], 'removed__before_saving_all_data')
self.all_data.drop(remove_index, inplace = True)
self.all_data['date'] = pd.to_datetime(self.all_data['SDATE'])
# MW: Add prioritized salinity
self._add_prioritized_parameter('SALT', 'SALT_BTL', 'SALT_CTD')
# MW: Add prioritized temperature
self._add_prioritized_parameter('TEMP', 'TEMP_BTL', 'TEMP_CTD')
# MW: Add prioritized oxygen
self._add_prioritized_parameter('DOXY', 'DOXY_BTL', 'DOXY_CTD')
if 'CPHL_BTL' in self.all_data.columns:
# MW: Add integrated chlorophyll from CHPL_BTL
self._add_integrated_calc(use_par='CPHL_BTL',
new_par='CPHL_INTEG_CALC',
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2)
self._add_waterbody_area_info()
sld_object.save_df(self.all_data, file_name='all_data', force_save_txt=True, only_pkl=False) # 20180525 by Magnus Wenzer
filetype = 'txt'
print('all_data loaded from txt and new parameters added')
return True, filetype
#==========================================================================
def _add_prioritized_parameter(self, new_par, primary_par, secondary_par, exclude_qf=['B', 'S']):
"""
Created: 20180413 by Magnus Wenzer
Last modified: 20180419 by Magnus Wenzer
Adds the parameter <new_par_name> by combining the parameters in args.
The first parameter in args that is not have a quality flag listed in exclude_qf
will be prioritized.
Three columns are added to self.all_data:
<new_par_name>
Q_<new_par_name>
source_<new_par_name>
"""
t0 = time.time()
primary_par_qf = 'Q_' + primary_par
secondary_par_qf = 'Q_' + secondary_par
q_new_par = 'Q_'+new_par
source_new_par = 'source_'+new_par
if not all([True if item in self.all_data.columns else False \
for item in [primary_par, primary_par_qf, secondary_par, secondary_par_qf]]):
if all([True if item in self.all_data.columns else False \
for item in [primary_par, secondary_par]]):
print('both parameters {} and {} in data but no q_flags'.format(primary_par, secondary_par))
elif primary_par in self.all_data.columns and secondary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[primary_par].copy()
self.all_data[source_new_par] = primary_par
return True
elif secondary_par in self.all_data.columns and primary_par not in self.all_data.columns:
self.all_data[new_par] = self.all_data[secondary_par].copy()
self.all_data[source_new_par] = secondary_par
return True
else:
return False
self.all_data[new_par] = np.nan
self.all_data[q_new_par] = ''
self.all_data[source_new_par] = ''
# Find where primary is valid
primary_valid = ~pd.isnull(self.all_data[primary_par]) & \
~self.all_data[primary_par_qf].isin(exclude_qf)
# Add where primary is valid
self.all_data.loc[primary_valid, new_par] = self.all_data.loc[primary_valid, primary_par]
self.all_data.loc[primary_valid, q_new_par] = self.all_data.loc[primary_valid, primary_par_qf]
self.all_data.loc[primary_valid, source_new_par] = primary_par
# Find where primary is not valid and secondary is
add_secondary_valid = ~pd.isnull(self.all_data[secondary_par]) & \
~self.all_data[secondary_par_qf].isin(exclude_qf) & \
~primary_valid
# Add where primary is not valid and secondary is
self.all_data.loc[add_secondary_valid, new_par] = self.all_data.loc[add_secondary_valid, secondary_par]
self.all_data.loc[add_secondary_valid, q_new_par] = self.all_data.loc[add_secondary_valid, secondary_par_qf]
self.all_data.loc[add_secondary_valid, source_new_par] = secondary_par
print('time for _add_prioritized_parameter {} is: {}'.format(new_par, time.time()-t0))
def _add_waterbody_area_info(self):
pass
# This is done in DataFrameHandler, but why not here?
#TODO:
# add if MS_CD, VISS_EU_CD; not in df.columns add them from vfk-kod kolumn
# wb_id_list = self.all_data[self.wb_id_header].tolist()
# # wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id_list[0])
# if 'WATER_DISTRICT_CODE' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_id = self.mapping_objects['water_body'].get_waterdistrictcode_for_water_body(wb_id)
# new_list.append(wd_id)
# self.all_data['WATER_DISTRICT_CODE'] = new_list
# if 'WATER_DISTRICT_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wd_name = self.mapping_objects['water_body'].get_waterdistrictname_for_water_body(wb_id)
# new_list.append(wd_name)
# self.all_data['WATER_DISTRICT_NAME'] = new_list
# if 'WATER_TYPE_AREA' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# type_name = self.mapping_objects['water_body'].get_type_area_name_for_water_body(wb_id)
# new_list.append(type_name)
# self.all_data['WATER_TYPE_AREA'] = new_list
# if 'WATER_BODY_NAME' not in self.all_data:
# new_list = []
# for wb_id in wb_id_list:
# wb_name = self.mapping_objects['water_body'].get_name_for_water_body(wb_id)
# new_list.append(wb_name)
# self.all_data['WATER_BODY_NAME'] = new_list
#===========================================================================
def get_exclude_index_array(self, df):
"""
Created: 20180423 by Magnus Wenzer
Last modified: 20180423 by Magnus Wenzer
"""
exclude_list = []
for col in df.columns:
if 'Q_' in col:
exclude_list.append(col[2:])
exclude_list.append(col)
elif 'source' in col:
exclude_list.append(col)
elif 'DIN' in col:
exclude_list.append(col)
elif 'DEPH' in col:
exclude_list.append(col)
exclude_index_list = [True if par in exclude_list else False for par in df.columns]
return np.array(exclude_index_list)
#===========================================================================
def _add_integrated_calc(self,
use_par=None,
new_par=None,
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2):
"""
Created: 20180423 by Magnus Wenzer
Last modified: 20180423 by Magnus Wenzer
"""
#----------------------------------------------------------------------
def calculate(df):
if len(df) < min_nr_values:
#print(len(df))
return False
# Extrac data lists
depth_list = list(df['DEPH'].values)
value_list = list(df[use_par].values)
t_calc_integ = time.time()
mean_value = utils.get_integrated_mean(depth_list,
value_list,
depth_interval)
time_list_calc_integ.append(time.time() - t_calc_integ)
t_add_row = time.time()
# Add info to row
new_row_series = df.loc[df.index[0], :].copy(deep=True)
new_row_series[new_par] = mean_value
new_row_series[new_par_depths] = ';'.join(map(str, depth_list))
new_row_series[new_par_values] = ';'.join(map(str, value_list))
new_row_series['MNDEP'] = depth_interval[0]
new_row_series['MXDEP'] = depth_interval[1]
#print('df.columns', len(df.columns))
#print(df.columns)
new_row = np.array(new_row_series)
# sets the other (with Q_flag, DIN and DEPH) parameters to nan
new_row[exclude_index_array] = np.nan
new_list_to_append.append(list(new_row))
time_list_add_row.append(time.time() - t_add_row)
return True
#----------------------------------------------------------------------
new_par_depths = new_par + '_depths'
new_par_values = new_par + '_values'
new_list_to_append = [] # list of lists with the new rows to be added to all_data once all calculations are done
# new_df = pd.DataFrame(columns=all_data.columns)
time_list_group_data = []
time_list_calc_integ = []
time_list_add_row = []
t_tot = time.time()
t_preparations = time.time()
# Add result columns
self.all_data[new_par] = np.nan
self.all_data[new_par_depths] = np.nan
self.all_data[new_par_values] = np.nan
exclude_index_array = self.get_exclude_index_array(self.all_data)
# print(len(exclude_index_array))
# print(len(all_data.columns))
# Narrow the data to only include lines where par is present and depth is in range
use_par_boolean = ~self.all_data[use_par].isnull()
depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
(self.all_data['DEPH'] <= depth_interval[1])
active_boolean = use_par_boolean & depth_boolean
time_preparations = time.time() - t_preparations
t_group_data = time.time()
grouped_data = self.all_data.loc[active_boolean, :].groupby('visit_id_str')
time_list_group_data.append(time.time() - t_group_data)
t_iterator = time.time()
calculations = (calculate(group) for visit_id, group in grouped_data)
time_iterator = time.time() - t_iterator
t_all_calculation = time.time()
result = list(calculations)
time_all_calculation = time.time() - t_all_calculation
# Add new rows to self.all_data
t_add_data = time.time()
add_lines_df = pd.DataFrame(new_list_to_append, columns=self.all_data.columns)
self.all_data = self.all_data.append(add_lines_df)
self.all_data.reset_index(drop=True, inplace=True)
time_add_data = time.time() - t_add_data
time_total = time.time() - t_tot
print('-'*50)
print('Total time:', time_total)
print('time_preparations'.ljust(30), time_preparations)
print('time_list_group_data:'.ljust(30), sum(time_list_group_data))
print('time_list_calc_integ:'.ljust(30), sum(time_list_calc_integ))
print('time_list_add_row:'.ljust(30), sum(time_list_add_row))
print('time_all_calculations:'.ljust(30), time_all_calculation)
print('time_iterator:'.ljust(30), time_iterator)
print('time_add_data:'.ljust(30), time_add_data)
print('Done adding integrated_calc "{}" using parameter "{}"'.format(new_par, use_par))
print('time for integrated_calc "{}" using parameter "{} is: {}'.format(new_par, use_par, time_total))
#===========================================================================
def old_add_integrated_calc(self,
par,
new_par_name,
depth_interval=[0, 10],
exclude_qf=[u'?',u'B',u'S'],
min_nr_values=2):
"""
Created: 20180420 by Magnus Wenzer
Last modified: 20180420 by Magnus Wenzer
"""
def calculate(current_visit_id):
# print(current_visit_id)
visit_boolean = self.all_data['visit_id_str'] == current_visit_id
index = par_boolean & visit_boolean
# Extrac data lists
depth_list = list(self.all_data.loc[index, 'DEPH'].values)
value_list = list(self.all_data.loc[index, par].values)
# Continue if not enough data to calculate
# if len(depth_list) < min_nr_values:
# return False
mean_value = utils.get_integrated_mean(depth_list,
value_list,
depth_interval)
new_row = []
for parameter, value in zip(self.all_data.columns, self.all_data.loc[visit_boolean,:].values[0]):
if parameter == 'MNDEP':
new_row.append(depth_interval[0])
elif parameter == 'MXDEP':
new_row.append(depth_interval[1])
elif parameter == new_par_name:
new_row.append(mean_value)
elif parameter == new_par_name_depth:
new_row.append(';'.join(map(str, depth_list)))
elif parameter == new_par_name_values:
new_row.append(';'.join(map(str, value_list)))
elif parameter in exclude_list:
new_row.append(np.nan)
else:
new_row.append(value)
# print(len(self.all_data)+1)
self.all_data.loc[max(self.all_data)+1, :] = new_row
return True
new_par_name_depth = new_par_name + '_depths'
new_par_name_values = new_par_name + '_values'
# Add new columns to dataframe
self.all_data[new_par_name] = np.nan
self.all_data[new_par_name_depth] = ''
self.all_data[new_par_name_values] = ''
# Check columns to exclude in row
exclude_list = []
for item in self.all_data.columns:
if item.startswith('Q_'):
exclude_list.append(item[2:])
exclude_list.append(item)
elif item.startswith('source_'):
exclude_list.append(item)
# Create boolen where par has values
par_boolean = ~self.all_data[par].isnull()
#----------------------------------------------------------------------
# Depth boolean to reduce nr of unique visits.
# This has to be removed/changed if halocline depth should be used
# instead of fixed depth interval.
# OBS! also used below!
depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
(self.all_data['DEPH'] <= depth_interval[1])
par_boolean = par_boolean & depth_boolean
#----------------------------------------------------------------------
# Get list och unique visits
unique_visit_id_list = list(set(self.all_data.loc[par_boolean, 'visit_id_str']))
temp = list(map(calculate, unique_visit_id_list))
# return
## # Get next index in self.all_data . Increment this after adding new line to save time
## next_index = max(self.all_data.index) + 1
##
##
## #----------------------------------------------------------------------
## input_dict = {'current_visit_id': current_visit_id,
## }
##
##
## df_list = [by_year_pos.loc[by_year_pos.YEAR == year]['position_mean']]*n
## def bootstrap(df):
## return df.sample(frac = 1, replace = True).mean()
##
## BQIsim = map(bootstrap, df_list)
# #----------------------------------------------------------------------
#
#
#
#
# # Loop unique visits
# for k, current_visit_id in enumerate(unique_visit_id_list):
# if not k%100:
# print(k, current_visit_id)
## # Create boolen where par has values
## par_boolean = ~self.all_data[par].isnull()
##
## #----------------------------------------------------------------------
## # Depth boolean to reduce nr of unique visits.
## # This has to be removed/changed if halocline depth should be used
## # instead of fixed depth interval.
## # OBS! also used below!
## depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
## (self.all_data['DEPH'] <= depth_interval[1])
## par_boolean = par_boolean & depth_boolean
## #----------------------------------------------------------------------
#
#
# visit_boolean = self.all_data['visit_id_str'] == current_visit_id
# index = par_boolean & visit_boolean
#
# # Extrac data lists
# depth_list = list(self.all_data.loc[index, 'DEPH'].values)
# value_list = list(self.all_data.loc[index, par].values)
#
# # Continue if not enough data to calculate
# if len(depth_list) < min_nr_values:
# continue
#
## #--------------------------------------------------------------
## par_boolean = ~self.all_data['CPHL_BTL'].isnull()
##
## depth_boolean = (self.all_data['DEPH'] >= depth_interval[0]) & \
## (self.all_data['DEPH'] <= depth_interval[1])
## par_boolean = par_boolean & depth_boolean
##
## visit_boolean = self.all_data['visit_id_str'] == '58.9113311.187502017-08-0111:40'
##
## print('='*50)
## print('1')
## print('='*50)
## print(self.all_data.loc[visit_boolean & par_boolean, ['index_column', 'DEPH', 'CPHL_BTL', 'Q_CPHL_BTL']])
## print('-'*50)
## #--------------------------------------------------------------
# #--------------------------------------------------------------
#
## print('='*50)
## print('2')
## print('='*50)
## print(self.all_data.loc[index, ['index_column', 'DEPH', 'CPHL_BTL', 'Q_CPHL_BTL']])
## print('-'*50)
## #--------------------------------------------------------------
##
##
## print('-'*50)
## print(current_visit_id)
## print(par)
## print(np.where(visit_boolean))
## print(np.where(visit_boolean))
## print(depth_list)
## print(value_list)
## print(depth_interval)
## print(len(self.all_data) )
## print(len(par_boolean))
#
# mean_value = utils.get_integrated_mean(depth_list,
# value_list,
# depth_interval)
#
# new_row = []
# for parameter, value in zip(self.all_data.columns, self.all_data.loc[visit_boolean,:].values[0]):
# if parameter == 'MNDEP':
# new_row.append(depth_interval[0])
# elif parameter == 'MXDEP':
# new_row.append(depth_interval[1])
# elif parameter == new_par_name:
# new_row.append(mean_value)
# elif parameter == new_par_name_depth:
# new_row.append(';'.join(map(str, depth_list)))
# elif parameter == new_par_name_values:
# new_row.append(';'.join(map(str, value_list)))
# elif parameter in exclude_list:
# new_row.append(np.nan)
# else:
# new_row.append(value)
#
# self.all_data.loc[next_index, :] = new_row
#
# next_index += 1
# class Calculations():
# def __init__(self):
# pass
#
# based_on_par_boolean = ~self.all_data[based_on_par].isnull()
#
#
#
#
# depths = self.get_float_array(u'DEPH', ignore_qf=exclude_qf)
# index = np.where((depths >= depth_interval[0]) & (depths <= depth_interval[-1]))[0]
# depths = depths[index]
# values = self.get_float_array(par, ignore_qf=exclude_qf)[index]
#
#
# # First remove empty values and nan
# missing_data_at_depth = []
# depth_list = []
# value_list = []
# for d, v in zip(depths, values):
# if not np.isnan(d) and not np.isnan(v):
# depth_list.append(d)
# value_list.append(v)
# else:
# missing_data_at_depth.append(d)
#
# sum_list = []
# if len(depth_list) >= min_nr_values:
# # Make sure to integrate the whole surface lager if selected
# if depth_list[0] != depth_interval[0]:
# depth_list.insert(0, depth_interval[0])
# value_list.insert(0, value_list[0])
# if depth_list[-1] != depth_interval[-1]:
# depth_list.append(depth_interval[-1])
# value_list.append(value_list[-1])
#
# for z0, z1, v0, v1 in zip(depth_list[:-1], depth_list[1:],
# value_list[:-1], value_list[1:]):
#
# part_sum = 0.5*(v1+v0)*(z1-z0)
#
# sum_list.append(part_sum)
#
# mean_value = sum(sum_list)/(depth_list[-1]-depth_list[0])
# else:
# if missing_value != None:
# mean_value = missing_value
# else:
# mean_value = np.nan
#
# calculations = Calculations()
# calculations.exclude_qf = exclude_qf
# calculations.min_nr_values = min_nr_values
# calculations.depth_interval = depth_interval
# calculations.used_values = [round(v, 2) for v in value_list]
# calculations.used_depths = depth_list
# calculations.nr_values_used = len(calculations.used_values)
# calculations.segments = sum_list
# calculations.missing_data_at_depth = missing_data_at_depth
# calculations.value = mean_value
#
# return calculations
#==========================================================================
def load_data(self, directory):
try:
column_file_path = directory + '/column_data.txt'
self.column_data = pd.read_csv(column_file_path, sep='\t', encoding='cp1252')
except:
pass
try:
row_file_path = directory + '/row_data.txt'
self.row_data = pd.read_csv(row_file_path, sep='\t', encoding='cp1252')
except:
pass
if __name__ == '__main__':
print('='*50)
print('Running module "data_handler.py"')
print('-'*50)
print('')
#
# raw_data_file_path = 'D:/Utveckling/g_EKOSTAT_tool/test_data/raw_data/data_BAS_2000-2009.txt'
# first_filter_directory = 'D:/Utveckling/g_EKOSTAT_tool/test_data/filtered_data'
# Handler
# raw_data = core.DataHandler('raw')
# raw_data.add_txt_file(raw_data_file_path, data_type='column')
#
print('-'*50)
print('done')
print('-'*50)
|
ekostat/ekostat_calculator
|
core/data_handlers.py
|
Python
|
mit
| 96,099
|
[
"NetCDF",
"VisIt"
] |
0e6fc72b41e21f56c261e2a006d620ad16de87786b6a8485d952eb46d26126b7
|
# Copyright (C) 2015 Philipp Baumgaertel
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE.txt file for details.
from itertools import combinations
import numpy as np
from scipy.misc import derivative
from skgpuppy.FFNI import PropagateMoments
#from hashlib import sha1
#tuple seems the fastes way to create hashables from small numpy arrays
hashable = tuple
#hashable = lambda thing: sha1(thing).hexdigest()
# class hashable:
# def __init__(self,thing):
# self._hash = hash(sha1(thing).hexdigest())
# self._thing = thing
#
# def __hash__(self):
# return self._hash
# def __eq__(self, other):
# return np.all(self._thing == other._thing)
def _setpartition(iterable, n=2):
"""
Gets the pairs for Isserli's theorem
:param iterable: Iterable
:param n: number of elements in each set
:return:
"""
iterable = list(iterable)
partitions = combinations(combinations(iterable, r=n), r=len(iterable) // n)
for partition in partitions:
seen = set()
for group in partition:
if seen.intersection(group):
break
seen.update(group)
else:
yield partition
def _fast_isserli(powerlist,Sigma_x):
"""
Get higher order mixed centralized moments of a multivariate gaussian using isserlis theorem
http://en.wikipedia.org/wiki/Isserlis%27_theorem
This is optimized by assuming the x_i to be independent
:math:`E[(x-\mu_x)^{k_x} (y-\mu_y)^{k_y}] = E[(x-\mu_x)^{k_x}] \cdot E [(y-\mu_y)^{k_y}]` => no need for isserli
:param powerlist: list of powers of the random variables of the multivariate normal
:param Sigma_x: The covariance matrix
:return:
"""
from scipy.misc import factorial2
if powerlist.sum() % 2 != 0:
#Odd order
return 0
for power in powerlist:
if power % 2 != 0:
return 0
part = 1.0
for i,power in enumerate(powerlist):
part *= Sigma_x[i][i]**(power/2)
part *= factorial2(power-1, exact=True)
return part
def _Isserli(powerlist, Sigma_x, diagonal=True):
"""
Get higher order mixed centralized moments of a multivariate gaussian using isserlis theorem
http://en.wikipedia.org/wiki/Isserlis%27_theorem
:param powerlist: list of powers of the random variables of the multivariate normal
:param Sigma_x: The covariance matrix
:return:
"""
v = list(range(powerlist.sum()))
if len(v) % 2 != 0:
#Odd order
return 0
v1 = []
for i,power in enumerate(powerlist):
for j in range(power):
v1.append(i)
if diagonal:
for power in powerlist:
if power%2 != 0:
return 0
result = 0
count = 0
for s in _setpartition(v):
part = 1
# groups = []
for group in s:
# groups.append((v1[group[0]],v1[group[1]])) # Just for output
part *= Sigma_x[v1[group[0]]][v1[group[1]]]
if part == 0:
break
# if part != 0:
# count += 1
# print groups
result += part
# print count
return result
class _ndderivative(object):
"""
Class to calculate multidimensional derivatives of arbitrary mixed order.
Function calls are being cached for expensive functions.
"""
def __init__(self,func):
class f_class(object):
"""
Wrapper to cache the function values
"""
def __init__(self,func):
self.calls = 0
self.cache = {} # => von 284 auf 37 function calls runter (even better?)
self.func = func
def __call__(self,x):
tx = tuple(x)
if tx not in self.cache:
self.calls += 1
self.cache[tx] = self.func(x)
return self.cache[tx]
self.func = f_class(func)
def ndderivative(self,mean,powerlist,dx=1e-2):
"""
:param mean: The mean
:param powerlist: list of powers for the differentiation of each variable (diff is order agnostic)
:param dx: the distance
:return:
"""
def derive_1d(func, i,n):
"""
:param func: The function to derive (must accept arbitrary arguments)
:param i: number of the dimension to derive
:param n: order of derivation
:return: derived function
"""
def derived_func(mean):
def f(x,mean,i):
m = mean[:]
m[i] = x
return func(m)
if n%2 == 0:
order = n+1
else:
order = n+2
return derivative(f,mean[i], dx=dx, args=(mean,i),n=n,order=order)
return derived_func
f = self.func
for i,power in enumerate(powerlist):
if power != 0:
f = derive_1d(f, i, n=power)
return f(mean)
def _get_powerlists(order,dims,leq=False,powerlist = None):
"""
:param order: Get the powerlists of a Taylor series for that specific order
:param dims: Number of dimensions
:param leq: return the powerlists with for all orders leq order or just for that specific order
:param powerlist: Just for recursion
:return:
"""
results = []
if powerlist is None:
powerlist = []
sum_so_far = sum(powerlist)
for i in range(order+1-sum_so_far):
p = powerlist[:]
p.append(i)
if len(p) == dims:
if sum(p) == order or leq:
#if leq is True, we generate all powerlists with leq order => required for the Taylor series
# => for the Taylor series, we just have to sum up the results for all powerlists
results.append(np.array(p))
else:
powerlists = _get_powerlists(order,dims,powerlist=p,leq=leq)
results.extend(powerlists)
return results
class TaylorPropagation(PropagateMoments):
"""
Class to perform error propagation using Taylor Series
"""
def __init__(self, func, mean, order, dx=1e-3):
"""
:param func: (n-d) function to approximate
:param mean: approximate around this mean vector
:param order: order of the taylor series
:param dx: step size for the derivatives
"""
PropagateMoments.__init__(self, func, mean)
self.dims = len(mean)
self.order = order
self.dx = dx
self.powerlists = _get_powerlists(self.order, self.dims, leq=True)
self.derivatives = []
nddev = _ndderivative(func)
for powerlist in self.powerlists:
self.derivatives.append(nddev.ndderivative(self.mean, powerlist, dx=self.dx))
self.termlist = []
for i, powerlist in enumerate(self.powerlists):
term = self.derivatives[i] # _ndderivative(self.func,self.mean,powerlist,dx=self.dx)
term /= self._factorials(powerlist)
self.termlist.append(term)
print("Function calls: ", nddev.func.calls)
# import matplotlib.pyplot as plt
# xs = np.array(nddev.func.cache.keys())
#
# plt.scatter(xs.T[0],xs.T[1])
# plt.title('Output PDF')
# plt.show()
def __call__(self,x):
return self.estimate(x)
def estimate_many(self,x_list):
"""
Estimate the value of the approximated function at several x
:param x_list:
:return: Approximated value of func at the x values
"""
results = []
for x in x_list:
results.append(self.estimate(x))
return results
def estimate(self,x):
"""
Estimate the value of the approximated function at x
:param x:
:return: Approximated value of func at x
"""
result = 0.0
for i,powerlist in enumerate(self.powerlists):
term = 1.0
for j,p in enumerate(powerlist):
term *= (x[j] - self.mean[j])**p
result += term*self.termlist[i]
return result
def _factorials(self,powerlist):
from scipy.misc import factorial
n = 1.0
for p in powerlist:
n *= factorial(p,exact=True)
return n
def _exn(self,n,Sigma_x):
"""
Generates the n-th moment (not centralized!) of the output distribution
:param n: order of the moment
:param Sigma_x: Covariance matrix
:return: That moment
"""
#@profile
def _exn_rec(n,term,product_powerlist,isserlimap):
"""
Helper function for recursion
:param n:
:param term:
:param product_powerlist:
:param isserlimap:
:return:
"""
ex4 = 0
if n > 0:
for i, powerlist in enumerate(self.powerlists):
ex4 += _exn_rec(n-1,term*self.termlist[i],product_powerlist+powerlist,isserlimap)
else:
ex4 = term
hashable_pp_list = hashable(product_powerlist)
if hashable_pp_list not in isserlimap:
isserlimap[hashable_pp_list] = _fast_isserli(product_powerlist,Sigma_x)
ex4 *= isserlimap[hashable_pp_list]
return ex4
isserlimap = {}
return _exn_rec(n,1,0,isserlimap)
|
snphbaum/scikit-gpuppy
|
skgpuppy/TaylorPropagation.py
|
Python
|
bsd-3-clause
| 8,018
|
[
"Gaussian"
] |
a8893b444df57cc32119de9387cb7e4020a47c7b184f2a5240e4aecf780d8467
|
import json
import os
import urllib2
filename = "../../AP_ITEM_DATASET/5.11/NORMAL_5X5/NA.json"
base_match_url = "https://na.api.pvp.net/api/lol/na/v2.2/match/"
api_key = "?api_key=72ed6f93-1e5d-47b3-ae92-8c4657887887"
class Item:
def __init__(self, item_name, id, pre_work):
self.__item_name = item_name
self.__id = id
self.__pre_rework = pre_work
self.__count = 0
self.__average_per_game = 0.0
def getName(self):
return self.__item_name
def Add(self):
self.__count += 1
def getCount(self):
return self.__count
def getId(self):
return self.__id
def calculateAverage(self, num_games):
self.__average_per_game = self.__count / num_games
def getAverage(self):
return self.__average_per_game
def printAllItemCount(items):
for id in sorted(items):
print("{0} has a count of {1}".format(items[id].getName(), items[id].getCount()))
print("\n")
def main():
# initialize data
items = { 3116: Item("Rylai's Crystal Scepter", 3116, True),
3089: Item("Rabadon's Deathcap", 3089, True),
3135: Item("Void Staff", 3135, True),
3135: Item("Liandry's Torment", 3151, True),
3157: Item("Zhonya's Hourglass", 3157, True),
3165: Item("Morellonomicon", 3165, True),
3174: Item("Athene's Unholy Grail", 3174, True),
3285: Item("Luden's Echo", 3285, True),
3003: Item("Argchangel's Staff", 3003, True),
3048: Item("Seraph's Embrace", 3048, True),
3027: Item("Rod of Ages", 3027, True),
3115: Item("Nashor's Tooth", 3115, True),
3152: Item("Will of the Ancients", 3152, True),
}
# while (True):
# match_id = raw_input("Please give a match id or type 'exit' to exit.")
# if (match_id == "exit"):
# break
# print("match_id number is {0}".format(match_id))
with open(filename, "r") as jsonfile:
for match_id in json.load(jsonfile):
# FORM URL
full_url = base_match_url + str(match_id) + api_key
#print("URL: {0}".format(full_url))
# CALL API WITH FORMED URL
response = ""
try:
response = urllib2.urlopen(full_url)
except urllib2.URLError as e:
print "{0} is an invalid match id".format(match_id)
continue
# return
data = json.load(response)
# get player stats
for player in data["participants"]:
stats = player["stats"]
for item in range(7):
id = int(stats["item{0}".format(item)])
if id in items:
items[id].Add()
# print aggregated data
printAllItemCount(items)
print("Goodbye.")
if __name__=='__main__':
main()
|
drood1/Riot_API_Challenge_2
|
API_Challenge_2/Program.py
|
Python
|
mit
| 3,120
|
[
"CRYSTAL"
] |
fea44355b18b07df8ba159fafa8658860e973d98cd039e699e9f3ea80575543d
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import espressopp
def info(system, integrator, per_atom=False):
NPart = espressopp.analysis.NPart(system).compute()
T = espressopp.analysis.Temperature(system).compute()
P = espressopp.analysis.Pressure(system).compute()
Pij = espressopp.analysis.PressureTensor(system).compute()
step = integrator.step
Ek = (3.0/2.0) * NPart * T
Epot = []
Etotal = 0.0
if per_atom:
tot = '%5d %10.4f %10.6f %10.6f %12.8f' % (step, T, P, Pij[3], Ek/NPart)
else:
tot = '%5d %10.4f %10.6f %10.6f %12.3f' % (step, T, P, Pij[3], Ek)
tt = ''
for k in range(system.getNumberOfInteractions()):
e = system.getInteraction(k).computeEnergy()
Etotal += e
if per_atom:
tot += ' %12.8f' % (e/NPart)
tt += ' e%i/N ' % k
else:
tot += ' %12.3f' % e
tt += ' e%i ' % k
if per_atom:
tot += ' %12.8f' % (Etotal/NPart + Ek/NPart)
tt += ' etotal/N '
else:
tot += ' %12.3f' % (Etotal + Ek)
tt += ' etotal '
tot += ' %12.8f\n' % system.bc.boxL[0]
tt += ' boxL \n'
if step == 0:
if per_atom:
sys.stdout.write(' step T P Pxy ekin/N ' + tt)
else:
sys.stdout.write(' step T P Pxy ekin ' + tt)
sys.stdout.write(tot)
def final_info(system, integrator, vl, start_time, end_time):
NPart = espressopp.analysis.NPart(system).compute()
espressopp.tools.timers.show(integrator.getTimers(), precision=3)
sys.stdout.write('Total # of neighbors = %d\n' % vl.totalSize())
sys.stdout.write('Ave neighs/atom = %.1f\n' % (vl.totalSize() / float(NPart)))
sys.stdout.write('Neighbor list builds = %d\n' % vl.builds)
sys.stdout.write('Integration steps = %d\n' % integrator.step)
sys.stdout.write('CPUs = %i CPU time per CPU = %.5f\n' % (espressopp.MPI.COMM_WORLD.size, end_time - start_time))
|
capoe/espressopp.soap
|
src/tools/analyse.py
|
Python
|
gpl-3.0
| 2,814
|
[
"ESPResSo"
] |
e5b3b6a103dfa484079f141e9e1e9a112117c855cf5685a497e24c64434ed078
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.environ.get('OH_SECRET', 'DEFAULT COOKIE SECRET FOR DEVELOPING')
DEBUG = bool(os.environ.get('OH_DEBUG', False))
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'avatar',
'captcha',
'crispy_forms',
'mptt',
'oh_pages',
'oh_users',
'oh_discussion',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
APPEND_SLASH = True
ROOT_URLCONF = 'ooi2h.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ooi2h.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': os.environ.get('OH_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OH_DB_PORT', '3306'),
'NAME': os.environ.get('OH_DB_NAME', 'ooihack'),
'USER': os.environ.get('OH_DB_USER', 'ooihack'),
'PASSWORD': os.environ.get('OH_DB_PASSWORD', 'ooihack'),
'OPTIONS': {
'autocommit': True,
},
},
}
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/s/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/m/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
AUTH_USER_MODEL = 'oh_users.OUser'
LOGIN_URL = '/user/login/'
LOGIN_REDIRECT_URL = '/home/'
AVATAR_GRAVATAR_BACKUP = False
AVATAR_DEFAULT_URL = 'img/avatar.png'
EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend'
EMAIL_HOST = os.environ.get('OH_MAIL_HOST')
EMAIL_PORT = int(os.environ.get('OH_MAIL_PORT', 465))
EMAIL_HOST_USER = os.environ.get('OH_MAIL_USER', 'webmaster@ooi.moe')
EMAIL_HOST_PASSWORD = os.environ.get('OH_MAIL_PASSWORD')
|
acgx/ooi-hack
|
ooi2h/settings.py
|
Python
|
gpl-2.0
| 2,812
|
[
"MOE"
] |
a6c48221788630f621a09059dd6b23bf89b6c7634382a7a4953dabeb229a142e
|
#!/usr/bin/env python
import os
from ase.io import read
from ase.neb import NEB
from ase.calculators.turbomole import Turbomole
from ase.optimize import QuasiNewton
initial = read('initial.coord')
final = read('final.coord')
os.system('rm -f coord; cp initial.coord coord')
# Make a band consisting of 5 configs:
configs = [initial]
configs += [initial.copy() for i in range(3)]
configs += [final]
band = NEB(configs, climb=True)
# Interpolate linearly the positions of the not-endpoint-configs:
band.interpolate()
#Set calculators
for config in configs:
config.set_calculator(Turbomole())
# Optimize the Path:
relax = QuasiNewton(band, trajectory='neb.traj')
relax.run(fmax=0.05)
|
slabanja/ase
|
doc/ase/calculators/turbomole_ex2_diffuse_usingNEB.py
|
Python
|
gpl-2.0
| 694
|
[
"ASE",
"TURBOMOLE"
] |
9b6e1efafdfd59e2f7e1266073e05e3e402a9eb241a38a38d06f89f8b5551959
|
#
# Copyright (C) 2001 greg Landrum
#
# unit testing code for the composite model COM server
from rdkit import RDConfig
import unittest
from rdkit.ML.Composite import Composite
from win32com.client import Dispatch
from Numeric import *
class TestCase(unittest.TestCase):
def setUp(self):
print '\n%s: '%self.shortDescription(),
def testConnect(self):
" connecting to COM server "
ok = 1
try:
c = Dispatch('RD.Composite')
except:
ok = 0
assert ok and c is not None, 'connection to COM server failed'
def testLoad(self):
" loading a composite "
c = Dispatch('RD.Composite')
ok = 1
try:
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
except:
ok = 0
assert ok, 'LoadComposite failed'
def testNames(self):
" testing descriptor names "
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
names = c.GetDescriptorNames()
expectedNames = ('composition', 'max_atomic', 'has3d', 'has4d', 'has5d',
'elconc', 'atvol', 'isferro')
assert names==expectedNames, 'GetDescriptorNames failed'
def testInputOrder(self):
" testing input order "
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
names = c.GetDescriptorNames()
ok = 1
try:
c.SetInputOrder(names)
except:
ok = 0
assert ok,'SetInputOrder failed'
def testClassify(self):
" testing classification "
argV = ['CrPt3','fcc','AuCu3',58.09549962,36,4,0.228898,2.219,1,3.67481803894, 1, 0, 1, 0.619669341609, 14.523874905]
nameV = ['composition','Structure','Structure_Type','Volume',
'Electrons_Per_Unit','Atoms_Per_Unit','Hardness','DOS_Ef',
'isferro','max_atomic', 'has3d', 'has4d', 'has5d',
'elconc', 'atvol']
c = Dispatch('RD.Composite')
c.LoadComposite(RDConfig.RDCodeDir+'/ml/composite/test_data/composite_base.pkl')
c.SetInputOrder(nameV)
res = c.ClassifyExample(argV)
expected = [1,1.0]
assert res[0] == expected[0],'bad prediction'
assert res[1] == expected[1],'bad confidence'
def TestSuite():
suite = unittest.TestSuite()
suite.addTest(TestCase('testConnect'))
suite.addTest(TestCase('testLoad'))
suite.addTest(TestCase('testNames'))
suite.addTest(TestCase('testInputOrder'))
suite.addTest(TestCase('testClassify'))
return suite
if __name__ == '__main__':
suite = TestSuite()
unittest.TextTestRunner().run(suite)
|
rdkit/rdkit-orig
|
rdkit/ML/Composite/UnitTestCOMServer.py
|
Python
|
bsd-3-clause
| 2,592
|
[
"RDKit"
] |
0fd79f6de4be656fdda1f227ecf37b8af1ea35f5af4c8756a0444e28ff8afaa0
|
#!/usr/bin/env python
""" Generate The Corpus Cloud with Page Elements, to be Styled """
import jinja2
import arrow
corpus = {
"5Cars": "http://5cars.world",
"Amethyst Grills": "http://amethystgrills.com/",
"Ascension Symptoms": "http://ascension.fyi",
"Astral Seed": "http://trinitysoulstars.com",
"Bubblin": "http://bubblin.life",
"Clouds": "http://clouds.zone",
"Crystal God": "http://thecrystalgod.com/",
"decause": "http://twitter.com/remy_d",
"Five Cars": "http://5cars.world",
"Guarav": "http://trinitysoulstars.com",
"Higher Self": "http://highself.solutions",
"Juice Brew": "http://juicebrew.life",
"LightBody": "http://lightbodytherapy.life",
"Manifest": "http://trinitysoulstars.com",
"Mt Meru": "http://mtmeru.life",
"Realms": "http://trinitysoulstars.com",
"Starseed": "http://trinitysoulstars.com",
"Soulstar": "http://trinitysoulstars.com",
"Theosyn": "http://trinitysoulstars.com",
"TRS": "http://truthreignsupreme.club",
"Source": "http://github.com/trinitysoulstars",
}
terms = []
titles = ["Welcome to the Trinity Nodes - the most lit sector in the multiverse"]
metadesc = ["Welcome to the Trinity Nodes - the most lit sector in the multiverse"]
authors = ["Trinity Soulstars - https://github.com/trinitysoulstars"]
videos = ['<iframe width="100%" height="450" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/users/281665548&auto_play=false&hide_related=false&show_comments=true&show_user=true&show_reposts=false&visual=true"></iframe>']
boldwords = {
"Nino": "http://nino.movie",
}
# analytics = ['''
# ''']
for term, link in corpus.iteritems():
print term, link
terms.append(term)
print "terms = %s " % terms
print "titles = %s " % titles
print "metadesc = %s " % metadesc
print "authors = %s " % authors
print "videos = %s " % videos
#print "analytics = %s " % analytics
for term, link in boldwords.iteritems():
print term, link
template = jinja2.Template("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>
{%- for title in titles: -%}
{{title}}
{%- endfor -%}
</title>
<meta name="description" content="
{%- for desc in metadesc: -%}
{{desc}}
{%- endfor -%}"/>
<meta name="keywords" content="
{%- for term in terms: -%}
{{term}},
{%- endfor %}"/>
<meta name="author" content="
{%- for author in authors: -%}
{{author}}
{%- endfor -%}"/>
<link rel="stylesheet" type="text/css" href="style.css" media="screen"/>
<!-- Bootstrap Core CSS -->
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom Fonts -->
<link href="vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Lora:400,700,400italic,700italic" rel="stylesheet" type="text/css">
<link href="https://fonts.googleapis.com/css?family=Montserrat:400,700" rel="stylesheet" type="text/css">
<!-- Theme CSS -->
<link href="css/grayscale.min.css" rel="stylesheet">
<!-- Font for Stars Background -->
<link href='http://fonts.googleapis.com/css?family=Lato:300,400,700' rel='stylesheet' type='text/css'>
<!-- Custom CSS -->
<link href="css/custom.css" rel="stylesheet" type="text/css">
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(["setDomains", ["*.nino.movie","*.trinitysoulstars.github.io/nino.movie"]]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//piwik-decause.rhcloud.com/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', '17']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//piwik-decause.rhcloud.com/piwik.php?idsite=17" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
</head>
<div id='stars'></div>
<div id='stars2'></div>
<div id='stars3'></div>
<body id="page-top" data-spy="scroll" data-target=".navbar-fixed-top">
<!-- Navigation -->
<nav class="navbar navbar-custom navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-main-collapse">
Menu <i class="fa fa-bars"></i>
</button>
<a class="navbar-brand page-scroll" href="#page-top">
<i class="fa fa-codepen"></i> <span class="light">Trinity</span> NODE
</a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse navbar-right navbar-main-collapse">
<ul class="nav navbar-nav">
<!-- Hidden li included to remove active class from about link when scrolled up past about section -->
<li class="hidden">
<a href="#page-top"></a>
</li>
<li>
<a target="_blank" class="page-scroll" href="https://soundcloud.com/trinitysoulstars"><i style="margin-right: 3px;" class="fa fa-soundcloud"></i> <span style="font-size:13px;">SoundCloud</span></a>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</div>
<!-- /.container -->
</nav>
<!-- Body -->
<header class="intro" style="margin-top: 5%;">
<div class="intro-body">
<div class="container">
<div class="row">
<div class="col-md-8 col-md-offset-2">
<a class="logo" href ="http://trinitysoulstars.com/" target="_blank"><img style="width: 230px;" src="img/logo.png"/></a>
{% for video in videos: %}
{{video}},
{% endfor %}
<hr style="margin-top: 8px;margin-bottom: 13px;border: 0;border-top: 1px solid #eee;width: 500px;"/>
<p style="margin: 30px 0 40px;"><a style="margin-right:8px:" href="https://www.facebook.com/trinitysoulstars" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-facebook"></i>
</a>
<a style="margin-left:4px;margin-right:4px;" href="https://twitter.com/trinitysoulstar" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-twitter"></i>
</a>
<a href="https://www.instagram.com/trinitysoulstars/" class="btn btn-circle page-scroll" target="_blank">
<i class="fa fa-instagram"></i>
</a>
</p>
<!-- Tag Cloud -->
<p class='pcloud'>
{% for term, link in boldwords.iteritems(): -%}
<a class='boldwords btn-lg' target="_blank" href="{{link}}">{{term}}</a>
{% endfor -%}
{% for term, link in corpus.iteritems(): -%}
<a target="_blank" class="btn-lg" href="{{link}}">{{term}}</a>
{% endfor %}
</p>
</div>
</div>
</div>
</div>
</header>
<!-- jQuery -->
<script src="vendor/jquery/jquery.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="vendor/bootstrap/js/bootstrap.min.js"></script>
<!-- Plugin JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-easing/1.3/jquery.easing.min.js"></script>
<!-- Google Maps API Key - Use your own API key to enable the map feature. More information on the Google Maps API can be found at https://developers.google.com/maps/ -->
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?key=AIzaSyCRngKslUGJTlibkQ3FkfTxj3Xss1UlZDA&sensor=false"></script>
<!-- Theme JavaScript -->
<script src="js/grayscale.min.js"></script>
</body>
</html>
""")
# When you add new elements to the template, you must define it outside the template, and then pass in the value below
output = template.render(corpus=corpus, terms=terms, titles=titles, metadesc=metadesc, authors=authors, videos=videos, boldwords=boldwords)
with open('{}.html'.format(arrow.now().format()[0:10]), "wb") as f:
f.write(output)
|
trinitysoulstars/nino.movie
|
ninogenerator.py
|
Python
|
apache-2.0
| 9,332
|
[
"CRYSTAL"
] |
67e802b32eae12a3f233baeecbd9fbc58feac0b7cb7222c3a04e5481beba9964
|
"""
CLI options class for comictagger app
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import getopt
import platform
import os
import traceback
import ctversion
import utils
try:
import argparse
except:
pass
from genericmetadata import GenericMetadata
from comicarchive import MetaDataStyle
from versionchecker import VersionChecker
class Options:
help_text = """
Usage: {0} [OPTION]... [FILE LIST]
A utility for reading and writing metadata to comic archives.
If no options are given, {0} will run in windowed mode
-p, --print Print out tag info from file. Specify type
(via -t) to get only info of that tag type
--raw With -p, will print out the raw tag block(s)
from the file
-d, --delete Deletes the tag block of specified type (via -t)
-c, --copy=SOURCE Copy the specified source tag block to destination style
specified via via -t (potentially lossy operation)
-s, --save Save out tags as specified type (via -t)
Must specify also at least -o, -p, or -m
--nooverwrite Don't modify tag block if it already exists ( relevent for -s or -c )
-1, --assume-issue-one Assume issue number is 1 if not found ( relevent for -s )
-n, --dryrun Don't actually modify file (only relevent for -d, -s, or -r)
-t, --type=TYPE Specify TYPE as either "CR", "CBL", or "COMET" (as either
ComicRack, ComicBookLover, or CoMet style tags, respectivly)
-f, --parsefilename Parse the filename to get some info, specifically
series name, issue number, volume, and publication
year
-i, --interactive Interactively query the user when there are multiple matches for
an online search
--nosummary Suppress the default summary after a save operation
-o, --online Search online and attempt to identify file using
existing metadata and images in archive. May be used
in conjuntion with -f and -m
--id=ID Use the issue ID when searching online. Overrides all other metadata
-m, --metadata=LIST Explicity define, as a list, some tags to be used
e.g. "series=Plastic Man , publisher=Quality Comics"
"series=Kickers^, Inc., issue=1, year=1986"
Name-Value pairs are comma separated. Use a "^" to
escape an "=" or a ",", as shown in the example above
Some names that can be used:
series, issue, issueCount, year, publisher, title
-r, --rename Rename the file based on specified tag style.
--noabort Don't abort save operation when online match is of low confidence
-e, --export-to-zip Export RAR archive to Zip format
--delete-rar Delete original RAR archive after successful export to Zip
--abort-on-conflict Don't export to zip if intended new filename exists (Otherwise, creates
a new unique filename)
-S, --script=FILE Run an "add-on" python script that uses the comictagger library for custom
processing. Script arguments can follow the script name
-R, --recursive Recursively include files in sub-folders
--cv-api-key=KEY Use the given Comic Vine API Key (persisted in settings)
--only-set-cv-key Only set the Comiv Vine API key and quit
-w, --wait-on-cv-rate-limit When encountering a Comic Vine rate limit error, wait and retry query
-v, --verbose Be noisy when doing what it does
--terse Don't say much (for print mode)
--version Display version
-h, --help Display this message
For more help visit the wiki at: http://code.google.com/p/comictagger/
"""
def __init__(self):
self.data_style = None
self.no_gui = False
self.filename = None
self.verbose = False
self.terse = False
self.metadata = None
self.print_tags = False
self.copy_tags = False
self.delete_tags = False
self.export_to_zip = False
self.abort_export_on_conflict = False
self.delete_rar_after_export = False
self.search_online = False
self.dryrun = False
self.abortOnLowConfidence = True
self.save_tags = False
self.parse_filename = False
self.show_save_summary = False
self.raw = False
self.cv_api_key = None
self.only_set_key = False
self.rename_file = False
self.no_overwrite = False
self.interactive = False
self.issue_id = None
self.recursive = False
self.run_script = False
self.script = None
self.wait_and_retry_on_rate_limit = False
self.assume_issue_is_one_if_not_set = False
self.file_list = []
def display_msg_and_quit( self, msg, code, show_help=False ):
appname = os.path.basename(sys.argv[0])
if msg is not None:
print( msg )
if show_help:
print self.help_text.format(appname)
else:
print "For more help, run with '--help'"
sys.exit(code)
def parseMetadataFromString( self, mdstr ):
# The metadata string is a comma separated list of name-value pairs
# The names match the attributes of the internal metadata struct (for now)
# The caret is the special "escape character", since it's not common in
# natural language text
# example = "series=Kickers^, Inc. ,issue=1, year=1986"
escaped_comma = "^,"
escaped_equals = "^="
replacement_token = "<_~_>"
md = GenericMetadata()
# First, replace escaped commas with with a unique token (to be changed back later)
mdstr = mdstr.replace( escaped_comma, replacement_token)
tmp_list = mdstr.split(",")
md_list = []
for item in tmp_list:
item = item.replace( replacement_token, "," )
md_list.append(item)
# Now build a nice dict from the list
md_dict = dict()
for item in md_list:
# Make sure to fix any escaped equal signs
i = item.replace( escaped_equals, replacement_token)
key,value = i.split("=")
value = value.replace( replacement_token, "=" ).strip()
key = key.strip()
if key.lower() == "credit":
cred_attribs = value.split(":")
role = cred_attribs[0]
person = ( cred_attribs[1] if len( cred_attribs ) > 1 else "" )
primary = (cred_attribs[2] if len( cred_attribs ) > 2 else None )
md.addCredit( person.strip(), role.strip(), True if primary is not None else False )
else:
md_dict[key] = value
# Map the dict to the metadata object
for key in md_dict:
if not hasattr(md, key):
print "Warning: '{0}' is not a valid tag name".format(key)
else:
md.isEmpty = False
setattr( md, key, md_dict[key] )
#print md
return md
def launch_script(self, scriptfile):
# we were given a script. special case for the args:
# 1. ignore everthing before the -S,
# 2. pass all the ones that follow (including script name) to the script
script_args = list()
for idx, arg in enumerate(sys.argv):
if arg in [ '-S', '--script']:
#found script!
script_args = sys.argv[idx+1:]
break
sys.argv = script_args
if not os.path.exists(scriptfile):
print "Can't find {0}".format( scriptfile )
else:
# I *think* this makes sense:
# assume the base name of the file is the module name
# add the folder of the given file to the python path
# import module
dirname = os.path.dirname(scriptfile)
module_name = os.path.splitext(os.path.basename(scriptfile))[0]
sys.path = [dirname] + sys.path
try:
script = __import__(module_name)
# Determine if the entry point exists before trying to run it
if "main" in dir(script):
script.main()
else:
print "Can't find entry point \"main()\" in module \"{0}\"".format( module_name )
except Exception as e:
print "Script raised an unhandled exception: ", e
print traceback.format_exc()
sys.exit(0)
def parseCmdLineArgs(self):
if platform.system() == "Darwin" and hasattr(sys, "frozen") and sys.frozen == 1:
# remove the PSN ("process serial number") argument from OS/X
input_args = [a for a in sys.argv[1:] if "-psn_0_" not in a ]
else:
input_args = sys.argv[1:]
# first check if we're launching a script:
for n in range(len(input_args)):
if ( input_args[n] in [ "-S", "--script" ] and
n+1 < len(input_args)):
# insert a "--" which will cause getopt to ignore the remaining args
# so they will be passed to the script
input_args.insert(n+2, "--")
break
# parse command line options
try:
opts, args = getopt.getopt( input_args,
"hpdt:fm:vownsrc:ieRS:1",
[ "help", "print", "delete", "type=", "copy=", "parsefilename", "metadata=", "verbose",
"online", "dryrun", "save", "rename" , "raw", "noabort", "terse", "nooverwrite",
"interactive", "nosummary", "version", "id=" , "recursive", "script=",
"export-to-zip", "delete-rar", "abort-on-conflict", "assume-issue-one",
"cv-api-key=", "only-set-cv-key", "wait-on-cv-rate-limit" ] )
except getopt.GetoptError as err:
self.display_msg_and_quit( str(err), 2 )
# process options
for o, a in opts:
if o in ("-h", "--help"):
self.display_msg_and_quit( None, 0, show_help=True )
if o in ("-v", "--verbose"):
self.verbose = True
if o in ("-S", "--script"):
self.run_script = True
self.script = a
if o in ("-R", "--recursive"):
self.recursive = True
if o in ("-p", "--print"):
self.print_tags = True
if o in ("-d", "--delete"):
self.delete_tags = True
if o in ("-i", "--interactive"):
self.interactive = True
if o in ("-c", "--copy"):
self.copy_tags = True
if a.lower() == "cr":
self.copy_source = MetaDataStyle.CIX
elif a.lower() == "cbl":
self.copy_source = MetaDataStyle.CBI
elif a.lower() == "comet":
self.copy_source = MetaDataStyle.COMET
else:
self.display_msg_and_quit( "Invalid copy tag source type", 1 )
if o in ("-o", "--online"):
self.search_online = True
if o in ("-n", "--dryrun"):
self.dryrun = True
if o in ("-m", "--metadata"):
self.metadata = self.parseMetadataFromString(a)
if o in ("-s", "--save"):
self.save_tags = True
if o in ("-r", "--rename"):
self.rename_file = True
if o in ("-e", "--export_to_zip"):
self.export_to_zip = True
if o == "--delete-rar":
self.delete_rar_after_export = True
if o == "--abort-on-conflict":
self.abort_export_on_conflict = True
if o in ("-f", "--parsefilename"):
self.parse_filename = True
if o in ("-w", "--wait-on-cv-rate-limit"):
self.wait_and_retry_on_rate_limit = True
if o == "--id":
self.issue_id = a
if o == "--raw":
self.raw = True
if o == "--noabort":
self.abortOnLowConfidence = False
if o == "--terse":
self.terse = True
if o == "--nosummary":
self.show_save_summary = False
if o in ("-1", "--assume-issue-one"):
self.assume_issue_is_one_if_not_set = True
if o == "--nooverwrite":
self.no_overwrite = True
if o == "--cv-api-key":
self.cv_api_key = a
if o == "--only-set-cv-key":
self.only_set_key = True
if o == "--version":
print "ComicTagger {0} [{1} / {2}]".format(ctversion.version, ctversion.fork, ctversion.fork_tag)
print "Modified version of ComicTagger (Copyright (c) 2012-2014 Anthony Beville)"
print "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)"
sys.exit(0)
if o in ("-t", "--type"):
if a.lower() == "cr":
self.data_style = MetaDataStyle.CIX
elif a.lower() == "cbl":
self.data_style = MetaDataStyle.CBI
elif a.lower() == "comet":
self.data_style = MetaDataStyle.COMET
else:
self.display_msg_and_quit( "Invalid tag type", 1 )
if self.print_tags or self.delete_tags or self.save_tags or self.copy_tags or self.rename_file or self.export_to_zip or self.only_set_key:
self.no_gui = True
count = 0
if self.run_script: count += 1
if self.print_tags: count += 1
if self.delete_tags: count += 1
if self.save_tags: count += 1
if self.copy_tags: count += 1
if self.rename_file: count += 1
if self.export_to_zip: count +=1
if self.only_set_key: count +=1
if count > 1:
self.display_msg_and_quit( "Must choose only one action of print, delete, save, copy, rename, export, set key, or run script", 1 )
if self.script is not None:
self.launch_script( self.script )
if len(args) > 0:
if platform.system() == "Windows":
# no globbing on windows shell, so do it for them
import glob
self.file_list = []
for item in args:
self.file_list.extend(glob.glob(item))
if len(self.file_list) > 0:
self.filename = self.file_list[0]
else:
self.filename = args[0]
self.file_list = args
if self.only_set_key and self.cv_api_key == None:
self.display_msg_and_quit( "Key not given!", 1 )
if (self.only_set_key == False) and self.no_gui and (self.filename is None):
self.display_msg_and_quit( "Command requires at least one filename!", 1 )
if self.delete_tags and self.data_style is None:
self.display_msg_and_quit( "Please specify the type to delete with -t", 1 )
if self.save_tags and self.data_style is None:
self.display_msg_and_quit( "Please specify the type to save with -t", 1 )
if self.copy_tags and self.data_style is None:
self.display_msg_and_quit( "Please specify the type to copy to with -t", 1 )
#if self.rename_file and self.data_style is None:
# self.display_msg_and_quit( "Please specify the type to use for renaming with -t", 1 )
if self.recursive:
self.file_list = utils.get_recursive_filelist( self.file_list )
|
2mny/mylar
|
lib/comictaggerlib/options.py
|
Python
|
gpl-3.0
| 14,932
|
[
"VisIt"
] |
8140efcb08848549444ffb1a2d32558b4bda4c681de0e8f3c645aca71dfe00f2
|
# GromacsWrapper: xpm.py
# Copyright (c) 2012 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.fileformats.convert` --- converting entries of tables
===================================================================
The :class:`Autoconverter` class was taken and slightly adapted from
RecSQL_, :mod:`recsql.converter`. It is mainly used by
:class:`gromacs.fileformats.xpm.XPM` to automagically generate useful
NumPy arrays from xpm files. Custom conversions beyond the default
ones in :class:`Autoconverter` can be provided with the constructor
keyword *mapping*.
.. _RecSQL: http://orbeckst.github.com/RecSQL/
.. autoclass:: Autoconverter
:members:
.. function:: convert(x)
Convert *x* (if in the active state)
.. attribute:: active
If set to ``True`` then conversion takes place; ``False`` just
returns :func:`besttype` applid to the value.
.. autofunction:: besttype
.. autofunction:: to_unicode
"""
import re
def to_unicode(obj, encoding='utf-8'):
"""Convert obj to unicode (if it can be be converted)
from http://farmdev.com/talks/unicode/"""
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
class Autoconverter(object):
"""Automatically convert an input value to a special python object.
The :meth:`Autoconverter.convert` method turns the value into a special
python value and casts strings to the "best" type (see :func:`besttype`).
The defaults for the conversion of a input field value to a
special python value are:
=========== ===============
value python
=========== ===============
'``---``' ``None``
'' ``None``
'True' ``True``
'x' ``True``
'X' ``True``
'yes' ``True``
'Present' ``True``
'False' ``False``
'-' ``False``
'no' ``False``
'None' ``False``
'none' ``False``
=========== ===============
If the *sep* keyword is set to a string instead of ``False`` then
values are split into tuples. Probably the most convenient way to
use this is to set *sep* = ``True`` (or ``None``) because this
splits on all white space whereas *sep* = ' ' would split multiple
spaces.
**Example**
- With *sep* = ``True``: 'foo bar 22 boing ``---``' --> ('foo', 'boing', 22, None)
- With *sep* = ',': 1,2,3,4 --> (1,2,3,4)
"""
def __init__(self, mode="fancy", mapping=None, active=True, sep=False, **kwargs):
"""Initialize the converter.
:Arguments:
*mode*
defines what the converter does
"simple"
convert entries with :func:`besttype`
"singlet"
convert entries with :func:`besttype` and apply
mappings
"fancy"
first splits fields into lists, tries mappings,
and does the stuff that "singlet" does
"unicode"
convert all entries with :func:`to_unicode`
*mapping*
any dict-like mapping that supports lookup. If``None`` then the
hard-coded defaults are used
*active* or *autoconvert*
initial state of the :attr:`Autoconverter.active` toggle.
``False`` deactivates any conversion. [``True``]
*sep*
character to split on (produces lists); use ``True`` or ``None``
(!) to split on all white space.
*encoding*
encoding of the input data [utf-8]
"""
self._convertors = {'unicode': unicode,
'simple': besttype,
'singlet': self._convert_singlet,
'fancy': self._convert_fancy,
}
self.convert = None # convertor function; set when self.active <-- True.
if mapping is None:
mapping = {'---': None, '':None,
'True':True, 'x': True, 'X':True, 'yes':True, 'Present':True, 'present':True,
'False':False, 'no': False, '-':False, 'None':False, 'none':False, }
self.mapping = mapping
self.encoding = kwargs.pop('encoding', "utf-8")
self.mode = mode
self.__active = None
self.active = kwargs.pop('autoconvert', active) # 'autoconvert' is a "strong" alias of 'active'
if sep is True:
sep = None # split on *all* white space, sep=' ' splits single spaces!
self.sep = sep
def active():
doc = """Toggle the state of the Autoconverter. ``True`` uses the mode, ``False`` does nothing"""
def fget(self):
return self.__active
def fset(self, x):
self.__active = x
if self.__active:
self.convert = self._convertors[self.mode]
else:
self.convert = lambda x: x # do nothing
return locals()
active = property(**active())
def _convert_singlet(self, s):
x = besttype(s, self.encoding)
try:
return self.mapping[x]
except KeyError:
return x
def _convert_fancy(self, field):
"""Convert to a list (sep != None) and convert list elements."""
if self.sep is False:
x = self._convert_singlet(field)
else:
x = tuple([self._convert_singlet(s) for s in field.split(self.sep)])
if len(x) == 0:
x = ''
elif len(x) == 1:
x = x[0]
#print "%r --> %r" % (field, x)
return x
def besttype(x, encoding="utf-8"):
"""Convert string x to the most useful type, i.e. int, float or unicode string.
If x is a quoted string (single or double quotes) then the quotes
are stripped and the enclosed string returned.
.. Note::
Strings will be returned as Unicode strings (using :func:`unicode`),
based on the *encoding* argument, which is "utf-8" by default.
"""
def unicodify(x):
return to_unicode(x, encoding)
x = unicodify(x) # make unicode as soon as possible
try:
x = x.strip()
except AttributeError:
pass
m = re.match(r"""['"](?P<value>.*)["']$""", x)
if m is None:
# not a quoted string, try different types
for converter in int, float, unicodify: # try them in increasing order of lenience
try:
return converter(x)
except ValueError:
pass
else:
# quoted string
x = unicodify(m.group('value'))
return x
def to_int64(a):
"""Return view of the recarray with all int32 cast to int64."""
# build new dtype and replace i4 --> i8
def promote_i4(typestr):
if typestr[1:] == 'i4':
typestr = typestr[0]+'i8'
return typestr
dtype = [(name, promote_i4(typestr)) for name,typestr in a.dtype.descr]
return a.astype(dtype)
def pyify(typestr):
if typestr[1] in 'iu':
return int
elif typestr[1] == 'f':
return float
elif typestr[1] == 'S':
return str
return lambda x: x
def to_pytypes(a):
dtype = [(name, pyify(typestr)) for name,typestr in a.dtype.descr]
return a.astype(dtype)
def irecarray_to_py(a):
"""Slow conversion of a recarray into a list of records with python types.
Get the field names from :attr:`a.dtype.names`.
:Returns: iterator so that one can handle big input arrays
"""
pytypes = [pyify(typestr) for name,typestr in a.dtype.descr]
def convert_record(r):
return tuple([converter(value) for converter, value in zip(pytypes,r)])
return (convert_record(r) for r in a)
|
pslacerda/GromacsWrapper
|
gromacs/fileformats/convert.py
|
Python
|
gpl-3.0
| 7,997
|
[
"Gromacs"
] |
bcd13cdc0fe07c6479b7133fefc8a9fb98caeba2bd89cfddc95014576415ba71
|
# uk.po
val = {"" : "Project-Id-Version: sheltermanager\nReport-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\nPOT-Creation-Date: 2013-01-24 10:55+0000\nPO-Revision-Date: 2011-10-10 12:35+0000\nLast-Translator: Asalle Kim <Asaly12@ukr.net>\nLanguage-Team: Ukrainian <uk@li.org>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\nX-Launchpad-Export-Date: 2014-01-23 05:37+0000\nX-Generator: Launchpad (build 16901)\n",
"{plural3} people with active reservations have not been homechecked" : "",
"Donation Type" : "",
"Use animal comments if photo notes are blank" : "",
"Half-Yearly" : "",
"Select recommended" : "",
"At least the last name should be completed." : "",
"Chinese Crested Dog" : "",
"New template" : "",
"Include incomplete medical and vaccination records when generating document templates" : "",
"Due today" : "",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "",
"Warnings" : "",
"Corded" : "",
"Edit diet" : "",
"Stolen {0}" : "",
"Domestic Long Hair" : "",
"{plural2} year" : "",
"Export this database in various formats" : "",
"Person - Name and Address" : "",
"Not For Adoption" : "",
"The date the animal was tattooed" : "",
"Entered From" : "",
"Base Color" : "",
"TT = first and second letter of animal type" : "",
"Reconcile" : "",
"Default Species" : "",
"View Manual" : "",
"Search Results for '{0}'" : "",
"Lost Animal - Details" : "",
"Remove the heartworm test fields from animal health details" : "",
"Income::Adoption" : "",
"{plural0} animal was euthanized" : "",
"If this person is a member, their membership number." : "",
"The size of this animal" : "",
"Use Automatic Insurance Numbers" : "",
"Cane Corso Mastiff" : "",
"Negative" : "",
"View Found Animal" : "",
"More diary notes" : "",
"Show animal thumbnails in movement and medical books" : "",
"Install the selected reports to your database" : "",
"Reservation date cannot be after cancellation date." : "",
"American Staffordshire Terrier" : "",
"Publish to folder" : "",
"New Diary" : "",
"This person has been banned from adopting animals." : "",
"Amazon" : "",
"Email person" : "",
"Default destination account for donations" : "",
"Affenpinscher" : "",
"Homecheck History" : "",
"Dosage" : "",
"Urgency" : "",
"Bank::Savings" : "",
"Last Name" : "",
"Tuesday" : "",
"Ginger" : "",
"New Regimen" : "",
"Australian Cattle Dog/Blue Heeler" : "",
"Boarding cost type" : "",
"Owner" : "",
"Medical Book" : "",
"Date lost cannot be blank." : "",
"Irish Terrier" : "",
"Mark selected donations received" : "",
"Found Animal: {0}" : "",
"Edit cost" : "",
"Jump to donations" : "",
"Successfully posted to Facebook" : "",
"Adoption Number" : "",
"McNab" : "",
"Munsterlander" : "",
"Recently deceased shelter animals (last 30 days)." : "",
"Staff record" : "",
"Add a log entry" : "",
"Generate document from this donation" : "",
"Create waiting list records from the selected forms" : "",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "",
"June" : "",
"The secondary breed of this animal" : "",
"Stay" : "",
"Lost to" : "",
"Removed" : "",
"Reservation Book" : "",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "",
"Points for matching features" : "",
"Credit Card" : "",
"Cockatoo" : "",
"Perform Homecheck" : "",
"Person" : "",
"Debit Card" : "",
"View Report" : "",
"Generate a new animal code" : "",
"Oriental Tabby" : "",
"Address Contains" : "",
"Financial" : "",
"Appaloosa" : "",
"Text" : "",
"Test book" : "",
"Header" : "",
"Heartworm Test Date" : "",
"English Coonhound" : "",
"Owner Vet" : "",
"Add movement" : "",
"Tibetan Spaniel" : "",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "",
"Exclude this image when publishing" : "",
"Chocolate Labrador Retriever" : "",
"This animal has been FIV/L tested" : "",
"Don't scale" : "",
"Allergies" : "",
"Chart (Bar)" : "",
"Keep table headers visible when scrolling" : "",
"Tooltip" : "",
"Animal food costs" : "",
"{plural2} urgent entries on the waiting list" : "",
"U (Unwanted Cat)" : "",
"MeetAPet Publisher" : "",
"Add a medical regimen" : "",
"Alaskan Malamute" : "",
"Wheaten Terrier" : "",
"Glen of Imaal Terrier" : "",
"Irish Water Spaniel" : "",
"{plural3} shelter animals have people looking for them" : "",
"Mountain Dog" : "",
"Silky Terrier" : "",
"Peacock/Pea fowl" : "",
"White German Shepherd" : "",
"Create a new animal from this waiting list entry" : "",
"To continue using ASM, please renew {0}" : "",
"Please select a PDF, HTML or JPG image file to attach" : "",
"Patterdale Terrier (Fell Terrier)" : "",
"Old Password" : "",
"Pixie-Bob" : "",
"Great Dane" : "",
"Executing..." : "",
"New Log" : "",
"Added by {0} on {1}" : "",
"Sloughi" : "",
"Expenses::Electricity" : "",
"Species to use when publishing to third party services and adoption sites" : "",
"Add found animal" : "",
"Show codes on the shelter view screen" : "",
"Rotate image 90 degrees anticlockwise" : "",
"FTP username" : "",
"Make this the default video link when publishing to the web" : "",
"Test marked as performed for {0} - {1}" : "",
"Rough" : "",
"Use a single breed field" : "",
"Blue" : "",
"{0} treatments every {1} months" : "",
"Flemish Giant" : "",
"Edit my diary notes" : "",
"Removal Reason" : "",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "",
"Add {0}" : "",
"Scottish Terrier Scottie" : "",
"Found animals reported in the last 30 days." : "",
"Create Log" : "",
"This animal is a crossbreed" : "",
"On Foster (in figures)" : "",
"{plural0} shelter animal has people looking for them" : "",
"Quarterhorse" : "",
"Housetrained" : "",
"Name and Address" : "",
"Remove the good with fields from animal notes" : "",
"Donation" : "",
"(none)" : "",
"Path" : "",
"weeks" : "",
"Flat-coated Retriever" : "",
"Mobile" : "",
"Address" : "",
"{plural3} unaltered animals have been adopted in the last month" : "",
"Positive/Negative" : "",
"These are the default values for these fields when creating new records." : "",
"{plural0} test needs to be performed today" : "",
"Black and White" : "",
"Fawn" : "",
"Reference" : "",
"Lancashire Heeler" : "",
"Ocicat" : "",
"Goose" : "",
"Default image for this record and the web" : "",
"{plural1} weeks" : "",
"Mouse" : "",
"The date this animal was reserved" : "",
"Change Investigation" : "",
"Default daily boarding cost" : "",
"Enable accounts functionality" : "",
"Lost Animal Contact" : "",
"Diary note cannot be blank" : "",
"Accountant" : "",
"Investigation" : "",
"Animal Name" : "",
"Day Pivot" : "",
"Type" : "Тип",
"Area where the animal was lost" : "",
"Message successfully sent" : "",
"Username" : "",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Costs: {3}, Total Costs: {4} Total Donations: {5}, Balance: {6}" : "",
"Add Found Animal" : "",
"You will need to upgrade to iOS 6 or higher to upload files." : "",
"Owners Vet" : "",
"Heartworm Tested" : "",
"Rabbit" : "",
"Manchester Terrier" : "",
"Hold" : "",
"{plural0} medical treatment needs to be administered today" : "",
"Health Problems" : "Проблеми зі здоров'ям",
"This person has been banned from adopting animals" : "",
"Bank::Deposit" : "",
"Adopt" : "",
"{plural3} animals died" : "",
"{plural0} day." : "",
"{plural3} animals were transferred to other shelters" : "",
"Found Animal {0}" : "",
"Enable sharing animals via Facebook" : "",
"Enable FTP uploading" : "",
"Add report" : "",
"New password and confirmation password don't match." : "",
"Add Diets" : "",
"Email users their diary notes each day" : "",
"September" : "",
"When posting an animal to Facebook, make a note of it in the log with this type" : "",
"Investigations" : "",
"Not eligible for gift aid" : "",
"days" : "",
"Urgent" : "",
"Litter" : "",
"Bank current account" : "",
"The date the animal was altered" : "",
"Include CSV header line" : "",
"Found Animal - Details" : "",
"Longest On Shelter" : "",
"Update system options" : "",
"Liver and White" : "",
"UUUUUUUUUU or UUUU = unique number" : "",
"Lookup Values" : "",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "",
"Create diary notes from a task" : "",
"Due" : "",
"Syntax check this SQL" : "",
"Default Vaccination Type" : "",
"Additional date field '{0}' contains an invalid date." : "",
"Electricity Bills" : "",
"Quarterly" : "",
"Set this to 0 to never automatically remove." : "",
"Tests" : "",
"Points for matching species" : "",
"Voucher Types" : "",
"Welcome!" : "",
"Liability" : "",
"Message from {0}" : "",
"Publishing Logs" : "",
"Search" : "",
"Email a copy of the selected documents" : "",
"Remove the investigation tab from person records" : "",
"Contact Contains" : "",
"Find a lost animal" : "",
"{plural2} medical treatments need to be administered today" : "",
"Creme DArgent" : "",
"Neapolitan Mastiff" : "",
"Removal reason" : "",
"Code" : "Код",
"Features" : "Можливості",
"Sep" : "",
"Dove" : "",
"The microchip number" : "",
"Sex" : "стать",
"Akita" : "",
"View Donation" : "",
"Frequency" : "",
"Generated document '{0}'" : "",
"Softbill (Other)" : "",
"Trial adoption" : "",
"Movements" : "",
"Date lost cannot be blank" : "",
"Code format tokens:" : "",
"Generate image thumbnails as tn_$$IMAGE$$" : "",
"Daily Boarding Cost" : "",
"Waiting list urgency update period in days" : "",
"Add donation" : "",
"Creating..." : "",
"The litter this animal belongs to" : "",
"Additional fields need a name, label and type." : "",
"Delete Cost" : "",
"Clone" : "",
"Retailer book" : "",
"Shelter animal {0} '{1}'" : "",
"White and Black" : "",
"Rabies Tag" : "",
"Bank" : "",
"Find a found animal" : "",
"Bulk Complete Diary" : "",
"Bull Terrier" : "",
"Reports" : "",
"Sorry. ASM will not work without Javascript." : "",
"Login" : "",
"Vaccination marked as given for {0} - {1}" : "",
"AdoptAPet Publisher" : "",
"Location and Species" : "",
"Date reported cannot be blank." : "",
"{plural0} person with an active reservation has not been homechecked" : "",
"When a message is created, email it to each matching user" : "",
"Allow overriding of the movement number on the Move menu screens" : "",
"FoundLost animal entry {0} successfully created." : "",
"Start Of Day" : "",
"Prefill new media notes for animal images with animal comments if left blank" : "",
"Shelter Details" : "",
"HelpingLostPets Publisher" : "",
"Date put on cannot be blank" : "",
"Curly" : "",
"Tabby and White" : "",
"Template" : "",
"Mark an animal deceased" : "",
"New Owner" : "",
"Start date must be a valid date" : "",
"SQL Interface" : "",
"Time On List" : "",
"Norwegian Lundehund" : "",
"Shelter stats (this year)" : "",
"Vaccinate Animal" : "",
"Cocker Spaniel" : "",
"View Lost Animal" : "",
"Returned to Owner {0}" : "",
"Edit diary notes" : "",
"FTP password" : "",
"Waiting list entries matching '{0}'." : "",
"Account code '{0}' is not valid." : "",
"Mark treatments given today" : "",
"Any markings or distinguishing features the animal has" : "",
"Account" : "",
"Havana" : "",
"Black and Tan" : "",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "",
"Find animal columns" : "",
"Belgian Hare" : "",
"Accounts need a code." : "",
"Death Reasons" : "",
"Add litter" : "",
"Add Person" : "",
"Leave" : "",
"Sorrel Tortoiseshell" : "",
"Default Cost" : "",
"Organization" : "",
"Reason for entry" : "",
"Belgian Shepherd Malinois" : "",
"Peruvian Paso" : "",
"Date of birth is not valid" : "",
"Expenses::Phone" : "",
"Tricolour" : "",
"Movement numbers must be unique." : "",
"Change Movement" : "",
"Quicklinks" : "",
"Norwich Terrier" : "",
"Find person" : "",
"Delete Found Animal" : "",
"Abyssinian" : "",
"The date the animal was adopted" : "",
"Access System Menu" : "",
"Show the full diary (instead of just my notes) on the home page" : "",
"Include quarantined animals" : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"Jack Russell Terrier" : "",
"Priority" : "",
"Foster" : "",
"Sick/Injured" : "",
"View Animals" : "",
"Save this record" : "",
"Animal code format" : "",
"Microchip" : "Мікрочіп",
"Dogs" : "",
"Bunny Rabbit" : "",
"Dwarf" : "",
"New Cost" : "",
"Blue Tortie" : "",
"Foster book" : "",
"Select person to merge" : "",
"Retailer movement successfully created." : "",
"Terrier" : "",
"Advanced" : "",
"Newfoundland Dog" : "",
"How urgent is it that we take this animal?" : "",
"Settings" : "",
"Warn when creating multiple reservations on the same animal" : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"The date this animal was found" : "",
"Return an animal from transfer" : "",
"New Test" : "",
"{plural0} trial adoption has ended" : "",
"RabiesTag" : "",
"Illyrian Sheepdog" : "",
"Found Animal Contact" : "",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "",
"Remove the document repository functionality from menus" : "",
"Generate a document from this person" : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"Date found cannot be blank." : "",
"Transferred In" : "",
"A short version of the reference number" : "",
"Given" : "",
"Paso Fino" : "",
"Scottish Fold" : "",
"Log successfully added." : "",
"Add Users" : "",
"All animals who are flagged as not for adoption." : "",
"Cell Phone" : "",
"Columns" : "",
"Movement" : "",
"Visual Theme" : "",
"Attach Link" : "",
"The date the animal was microchipped" : "",
"New Password" : "",
"Boxer" : "",
"Cheque" : "",
"Eskimo Dog" : "",
"{0} treatments every {1} weeks" : "",
"A publish job is already running." : "",
"Black Labrador Retriever" : "",
"Tonkinese" : "",
"CC" : "",
"You can bookmark search results, animals, people and most data entry screens." : "",
"Publishing complete." : "",
"Create Animal" : "",
"Unit within the location, eg: pen or cage number" : "",
"Javanese" : "",
"Hidden comments about the animal" : "",
"Income::OpeningBalances" : "",
"{plural2} shelter animals have people looking for them" : "",
"Whippet" : "",
"Lop Eared" : "",
"{plural1} vaccinations need to be administered today" : "",
"Escaped" : "",
"Error contacting server." : "",
"ASM 3 is compatible with your iPad and other tablets." : "",
"Can't reserve an animal that has an active movement." : "",
"Health and Identification" : "",
"The date this animal was removed from the waiting list" : "",
"Current Vet" : "",
"Reservation" : "",
"Delete Movement" : "",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "",
"Add vaccination" : "",
"Match lost and found animals" : "",
"Exclude animals who are aged under" : "",
"Shelter stats (this month)" : "",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "",
"Donations of type" : "",
"Movements require an animal." : "",
"Create a cost record" : "",
"Ragdoll" : "",
"Selkirk Rex" : "",
"Toucan" : "",
"Border Terrier" : "",
"Update animals with SmartTag Pet ID" : "",
"Retriever" : "",
"Email Address" : "",
"Add cost" : "",
"Animal - Entry" : "",
"The SmartTag PETID number" : "",
"This animal was euthanized" : "",
"Edit litter" : "",
"Home" : "",
"{plural3} months" : "",
"Yellow Labrador Retriever" : "",
"Delete Medical Records" : "",
"The result of the heartworm test" : "",
"Income::Shop" : "",
"Settings, Options" : "",
"Find person columns" : "",
"City contains" : "",
"All staff on file." : "",
"Duration" : "",
"Table" : "",
"F (Stray Dog)" : "",
"Heartworm Test Result" : "",
"{plural1} animals were reclaimed by their owners" : "",
"Homechecked By" : "",
"Errors" : "",
"The person record to merge must be different from the original." : "",
"Guinea Pig" : "",
"Smooth Fox Terrier" : "",
"Unsaved Changes" : "",
"Default Location" : "",
"Arabian" : "",
"West Highland White Terrier Westie" : "",
"SubTotal" : "",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "",
"Add diet" : "",
"Looking for" : "",
"{plural0} animal was adopted" : "",
"In SubTotal" : "",
"View Shelter Animals" : "",
"Good with Children" : "",
"Silver Marten" : "",
"Donations require a received date" : "",
"Deceased Date" : "",
"Messages" : "",
"Insurance No" : "",
"Lookup data" : "",
"Edit form field" : "",
"Create a new animal by copying this one" : "",
"Add diary" : "",
"Add user" : "",
"From retailer is only valid on adoption movements." : "",
"Edit media notes" : "",
"Document templates" : "",
"Shih Tzu" : "",
"White and Brindle" : "",
"Produce a PDF of printable labels" : "",
"When adding animals" : "",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "",
"{plural2} weeks" : "",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "",
"Species" : "Виды",
"Norwegian Forest Cat" : "",
"Edit HTML publishing templates" : "",
"Change Accounts" : "",
"System" : "",
"SQL is syntactically correct." : "",
"Movements require an animal" : "",
"Hold until {0}" : "",
"Great Pyrenees" : "",
"Online form fields need a name and label." : "",
"Mastiff" : "",
"Change Donation" : "",
"View Movement" : "",
"Facebook" : "",
"Kerry Blue Terrier" : "",
"Only publish a set number of animals" : "",
"Lost Animal - Additional" : "",
"Staffordshire Bull Terrier" : "",
"Postage costs" : "",
"New online form" : "",
"Return an animal from adoption" : "",
"Log date must be a valid date" : "",
"Boarding Cost" : "",
"Deposit" : "",
"Found to" : "",
"Amber" : "",
"Subject" : "",
"Image" : "",
"months" : "",
"Entry Reason Category" : "",
"Role is in use and cannot be deleted." : "",
"Similar Person" : "",
"Animal - Health and Identification" : "",
"{plural3} animals were euthanized" : "",
">>" : "",
"Tattoo" : "Тату",
"Feb" : "",
"{plural2} days." : "",
"Required date must be a valid date" : "",
"Lovebird" : "",
"New Field" : "",
"Draft" : "",
"Animals matching '{0}'." : "",
"Add Movement" : "",
"Vizsla" : "",
"Pug" : "",
"Add voucher" : "",
"Marketer" : "",
"Aged From" : "",
"The default username is 'user' with the password 'letmein'" : "",
"Publisher Species" : "",
"Breed to use when publishing to third party services and adoption sites" : "",
"Link to an external web resource" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"Spaniel" : "",
"The tattoo number" : "",
"Missouri Foxtrotter" : "",
"Use SQL Interface" : "",
"S (Stray Cat)" : "",
"Multiple Treatments" : "",
"Bouvier des Flanders" : "",
"These are the HTML headers and footers used when displaying online forms." : "",
"Paper Size" : "",
"Column" : "",
"January" : "",
"White and Torti" : "",
"Hedgehog" : "",
"Golden" : "",
"Document Link" : "",
"Vaccination Book" : "",
"The date the animal was born" : "",
"Transfer To" : "",
"Add Report" : "",
"Settings, Lookup data" : "",
"This animal is not on the shelter." : "",
"Generate Report" : "",
"Cream" : "",
"Points for matching breed" : "",
"Give and Reschedule" : "",
"Budgie/Budgerigar" : "",
"Kittens (under {0} months)" : "",
"Japanese Chin" : "",
"Reason the owner did not bring in the animal themselves" : "",
"{plural2} animals are not available for adoption" : "",
"Other Shelter" : "",
"This income account is the source for donations received of this type" : "",
"Mark this owner homechecked" : "",
"Description" : "",
"Good with Dogs" : "",
"Kennel Cough" : "",
"Korat" : "",
"Costs" : "",
"Title Initials Last" : "",
"Only PDF, HTML and JPG image files can be attached." : "",
"Percheron" : "",
"{0} {1} {2} aged {3}" : "",
"Color" : "Колір",
"Creating donations and donation types creates matching accounts and transactions" : "",
"Go the options screen and set your shelter's contact details and other settings." : "",
"Test Book" : "",
"Person looking for report" : "",
"Add a new log" : "",
"Mo" : "",
"Chicken" : "",
"Unreserved" : "",
"Send mass emails and perform mail merges" : "",
"Ragamuffin" : "",
"Title" : "",
"Returned" : "",
"Light Amber" : "",
"Delete this animal" : "",
"All donors on file." : "",
"Upload all available images for animals" : "",
"Adopted" : "",
"View media" : "",
"The date the retailer movement is effective from" : "",
"Diary notes for: {0}" : "",
"Change date given on selected treatments" : "",
"Canaan Dog" : "",
"Remove the size field from animal details" : "",
"Stationary costs" : "",
"Today" : "",
"Schipperke" : "",
"South Russian Ovcharka" : "",
"Premises" : "",
"Entered (oldest first)" : "",
"Donations require a person" : "",
"Scottish Deerhound" : "",
"Released To Wild" : "",
"Create this person" : "",
"Rhodesian Ridgeback" : "",
"FIV" : "",
"Roles" : "",
"Include animals who don't have a picture" : "",
"Diets need a start date." : "",
"Ruddy" : "",
"Delete Regimen" : "",
"Welsh Springer Spaniel" : "",
"{plural0} person has an overdue donation" : "",
"Telephone" : "",
"Animal - Notes" : "",
"Donation of {0} successfully received ({1})." : "",
"A description or other information about the animal" : "",
"There is not enough information in the form to create a waiting list record (need a description)." : "",
"Treat foster animals as part of the shelter inventory" : "",
"Rank" : "",
"View Investigations" : "",
"Accounts" : "",
"Presa Canario" : "",
"Tan" : "",
"From Other" : "",
"Rex" : "",
"Britannia Petite" : "",
"Add to log" : "",
"New Movement" : "",
"White and Tabby" : "",
"Date Found" : "",
"Import complete with {plural1} errors." : "",
"Internal Location" : "",
"Label" : "",
"Find lost animal returned {0} results." : "",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "",
"{plural0} week" : "",
"Default Breed" : "",
"French-Lop" : "",
"Post to Facebook as" : "",
"IP Restriction" : "",
"This will permanently remove the selected records, are you sure?" : "",
"(all)" : "",
"The shelter reference number" : "",
"Default Coat Type" : "",
"{plural1} urgent entries on the waiting list" : "",
"Remove the microchip fields from animal identification details" : "",
"Ferret" : "",
"Diary date is not valid" : "",
"Return a transferred animal" : "",
"Chocolate Tortie" : "",
"Doberman Pinscher" : "",
"Dalmatian" : "",
"Add a photo" : "",
"FTP hostname" : "",
"(unknown)" : "",
"This type of movement requires a date." : "",
"Age" : "Вік",
"Change System Options" : "",
"Show the breed fields" : "",
"{plural1} animals are not available for adoption" : "",
"This date of birth is an estimate" : "",
"Hamster" : "",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "",
"Days On Shelter" : "",
"Mustang" : "",
"Super user" : "",
"{plural3} trial adoptions have ended" : "",
"Labrador Retriever" : "",
"Automatically remove" : "",
"Brindle and Black" : "",
"Save this person" : "",
"Entering 'deceased' in the search box will show you recently deceased animals." : "",
"Transferred Out" : "",
"DOB" : "",
"Member" : "",
"Show short shelter codes on screens" : "",
"Good With Dogs" : "",
"Found from" : "",
"Donation?" : "",
"Import" : "",
"Not Reconciled" : "",
"Karelian Bear Dog" : "",
"Complaint" : "",
"August" : "",
"Due in next week" : "",
"Animal Types" : "",
"SQL dump (ASM2 HSQLDB Format)" : "",
"Diet" : "",
"Create lost animal records from the selected forms" : "",
"Income::Sponsorship" : "",
"Show transactions from" : "",
"Positive" : "",
"Remove the bonded with fields from animal entry details" : "",
"Died" : "Вимерло",
"Delete Log" : "",
"Diary Task" : "",
"October" : "",
"Up for adoption" : "",
"Document Templates" : "",
"Required" : "",
"Remove retailer functionality from the movement screens and menus" : "",
"Edit template" : "",
"Columns displayed" : "",
"Donations" : "",
"The animal sex" : "",
"Time" : "",
"Biting" : "",
"Transferred In {0}" : "",
"All animals who are currently quarantined." : "",
"Transactions need a date and description." : "",
"Good With Cats" : "",
"Children" : "",
"Out" : "",
"Media" : "",
"Gaited" : "",
"American Water Spaniel" : "",
"Time On Shelter" : "",
"Dutch" : "",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"The primary breed of this animal" : "",
"Thoroughbred" : "",
"Add an animal to the waiting list" : "",
"Altered" : "",
"My diary notes" : "",
"Persian" : "",
"Medical Profiles" : "",
"No Locations" : "",
"New Profile" : "",
"Vaccinations need an animal and at least a required date." : "",
"Chartreux" : "",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "",
"Reservation Cancelled" : "",
"Tabby" : "",
"Account code '{0}' has already been used." : "",
"Rhinelander" : "",
"ASM News" : "",
"Delete Incoming Forms" : "",
"Email this message to all matching users" : "",
"This code has already been used." : "",
"Document file" : "",
"View" : "",
"People matching '{0}'." : "",
"Reason Not From Owner" : "",
"Locale" : "",
"Mini-Lop" : "",
"Add Waiting List" : "",
"Parvovirus" : "",
"This animal is currently fostered, it will be automatically returned first." : "",
"Cinnamon Tortoiseshell" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"This can take some time and generate a large file, are you sure?" : "",
"Add Medical Records" : "",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "",
"Sheep" : "",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "",
"Template for Facebook posts" : "",
"Sponsorship donations" : "",
"Add template" : "",
"Removal" : "",
"Jan" : "",
"Waiting List Donation" : "",
"Create boarding cost record when animal is adopted" : "",
"Induct a new animal" : "",
"White" : "",
"Additional fields" : "",
"Brussels Griffon" : "",
"{plural2} unaltered animals have been adopted in the last month" : "",
"Use TLS" : "",
"Add investigation" : "",
"Transactions" : "",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "",
"Email media" : "",
"Change User Settings" : "",
"Updating..." : "",
"Edit transaction" : "",
"Browse sheltermanager.com" : "",
"Expenses::Stationary" : "",
"Auto removed due to lack of owner contact." : "",
"Animal (optional)" : "",
"Edit Roles" : "",
"Add Log to Animal" : "",
"System user accounts" : "",
"Attach a link to a web resource" : "",
"View Diary" : "",
"Profile name cannot be blank" : "",
"Person - Type" : "",
"Initials" : "",
"Simple" : "",
"N (Non Shelter Animal)" : "",
"Animal - Details" : "",
"System Admin" : "",
"Lost animal entry {0} successfully created." : "",
"Include deceased" : "",
"New Litter" : "",
"Cattle Dog" : "",
"Samoyed" : "",
"Organisation" : "",
"Generate a javascript database for the search page" : "",
"Change Log" : "",
"Chinchilla" : "",
"Start date" : "",
"Roles need a name." : "",
"Appenzell Mountain Dog" : "",
"Shepherd" : "",
"Added" : "",
"Boston Terrier" : "",
"Change Found Animal" : "",
"Add Message" : "",
"All animal shelters on file." : "",
"A unique number to identify this movement" : "",
"Booster" : "",
"Found Animal" : "",
"Transfers must have a valid transfer date." : "",
"Time on list" : "",
"Siamese" : "",
"If this person is a member, the date that membership expires." : "",
"Cockapoo" : "",
"treatments" : "",
"Black and Tan Coonhound" : "",
"All fields should be completed." : "",
"This year" : "",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "",
"Options" : "",
"Incoming donations (misc)" : "",
"Aged To" : "",
"Only show special needs" : "",
"Apr" : "",
"Money" : "",
"Shar Pei" : "",
"Microchip Date" : "",
"Change Medical Records" : "",
"Transfer?" : "",
"Dachshund" : "",
"Sexes" : "",
"Return Category" : "",
"Next>" : "",
"View Accounts" : "",
"Fawn Tortoiseshell" : "",
"Black" : "",
"View Tests" : "",
"Edit diary tasks" : "",
"View the animals in this litter" : "",
"All homecheckers on file." : "",
"Yes/No" : "",
"Most relevant" : "",
"Change Transactions" : "",
"Remove previously published files before uploading" : "",
"Litter Reference" : "",
"Monday" : "",
"Find Lost Animal" : "",
"Split baby/adult age at" : "",
"Add Donation" : "",
"Aged Between" : "",
"Use fancy tooltips" : "",
"Delete Treatments" : "",
"Date" : "Дата",
"View Litter" : "",
"Data" : "",
"Find Animal" : "",
"Superuser" : "",
"All time" : "",
"All fosterers on file." : "",
"Enabled" : "",
"Find found animal returned {0} results." : "",
"Default to advanced find person screen" : "",
"Payment Types" : "",
"Cinnamon" : "",
"Install" : "",
"Corgi" : "",
"Omit header/footer" : "",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "",
"History" : "",
"Attach File" : "",
"(both)" : "",
"Publish now" : "",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "",
"SmartTag PETID" : "",
"Due in next year" : "",
"Create Waiting List" : "",
"Title First Last" : "",
"Various" : "",
"Password is incorrect." : "",
"Welsh Corgi" : "",
"Dogue de Bordeaux" : "",
"{plural1} months" : "",
"Movement dates clash with an existing movement." : "",
"Hovawart" : "",
"Conure" : "",
"{plural1} days." : "",
"Mail" : "",
"Balinese" : "",
"Vaccination book" : "",
"Save this animal" : "",
"Important" : "",
"{plural3} animals were adopted" : "",
"Maltese" : "",
"New Waiting List Entry" : "",
"1 treatment" : "",
"Name" : "Ім’я",
"This animal was transferred from another shelter" : "",
"Healthy" : "",
"Seal" : "",
"Crossbreed" : "",
"treatments, every" : "",
"Publishing template" : "",
"{plural3} tests need to be performed today" : "",
"The result of the FIV test" : "",
"Invalid email address" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Jindo" : "",
"Work" : "",
"All animal care officers on file." : "",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "",
"Name contains" : "",
"Add Cost" : "",
"after connecting, chdir to" : "",
"Is this a permanent foster?" : "",
"Wed" : "",
"Norwegian Buhund" : "",
"Comments" : "Коментарі",
"Turkish Angora" : "",
"Movement Date" : "",
"UK Giftaid" : "",
"Test" : "",
"PetLink Publisher" : "",
"We" : "",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "",
"American Bulldog" : "",
"Wk" : "",
"Account disabled." : "",
"Lost animals reported in the last 30 days." : "",
"Order published animals by" : "",
"Diary" : "",
"{0} incurred in costs" : "",
"The SmartTag type" : "",
"Get more reports from sheltermanager.com" : "",
"Default Donation Type" : "",
"Show the location unit field" : "",
"Jump to diary" : "",
"Show quick links on all pages" : "",
"Brotogeris" : "",
"Annual" : "",
"Welsh Terrier" : "",
"Large" : "",
"Add Accounts" : "",
"Recently Changed" : "",
"Points for matching lost/found area" : "",
"Template Name" : "",
"Animal shortcode format" : "",
"Diary note {0} rediarised for {1}" : "",
"Find this address on a map" : "",
"Grey and White" : "",
"Delete Media" : "",
"Shiba Inu" : "",
"Hound" : "",
"First Last" : "",
"Edit test" : "",
"Californian" : "",
"Add a found animal" : "",
"Vaccination Types" : "",
"Bite" : "",
"Income" : "",
"Zipcode" : "",
"Jump to media" : "",
"Facebook page" : "",
"Horizontal Pitch" : "",
"Location" : "Місцерозташування",
"Chinese Foo Dog" : "",
"View Person" : "",
"Carolina Dog" : "",
"Pig (Farm)" : "",
"Saint Bernard St. Bernard" : "",
"Dead On Arrival" : "",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "",
"Saddlebred" : "",
"This person has not passed a homecheck" : "",
"Diary subject cannot be blank" : "",
"Edit vaccination" : "",
"Bank::Current" : "",
"All homechecked owners on file." : "",
"weeks after last contact." : "",
"Paint/Pinto" : "",
"Burmese" : "",
"African Grey" : "",
"Himalayan" : "",
"Points for being found within 2 weeks of being lost" : "",
"User Roles" : "",
"This animal is quarantined" : "",
"Report Title" : "",
"Attach a file" : "",
"Give" : "",
"Standardbred" : "",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "",
"Reset Password" : "",
"Users need a username, password and at least one role or the superuser flag setting." : "",
"Clydesdale" : "",
"Basset Hound" : "",
"Field Spaniel" : "",
"Last Month" : "",
"Vouchers" : "",
"Domestic Medium Hair" : "",
"Remove the FIV/L test fields from animal health details" : "",
"Default to advanced find animal screen" : "",
"Add log" : "",
"Log requires an animal." : "",
"{plural2} animals were transferred to other shelters" : "",
"Yes/No/Unknown" : "",
"Animal Shelter Manager Login" : "",
"Date brought in cannot be blank" : "",
"Publishing" : "",
"Create a new template by copying the selected template" : "",
"This animal was dead on arrival to the shelter" : "",
"Unaltered Adopted Animals" : "",
"Foster movements must have a valid foster date." : "",
"Logout" : "",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "",
"Zipcode contains" : "",
"Receive a donation" : "",
"Remove unwanted functionality" : "",
"{plural1} animals were transferred to other shelters" : "",
"English Shepherd" : "",
"Available for adoption" : "",
"Australian Kelpie" : "",
"The date this person was homechecked." : "",
"Species A-Z" : "",
"Highlight" : "",
"Birman" : "",
"Any information about the animal" : "",
"Area Found" : "",
"Available sheltermanager.com reports" : "",
"Mandatory" : "",
"To" : "",
"Date Reported" : "",
"Animal Emblems" : "",
"{plural0} animal was transferred to another shelter" : "",
"{plural1} trial adoptions have ended" : "",
"Colors" : "",
"All animals matching current publishing options." : "",
"Lhasa Apso" : "",
"This animal has a SmartTag PETID" : "",
"Edit online form" : "",
"Bulk Complete Waiting List" : "",
"Size" : "Розмір",
"Additional" : "",
"Document Repository" : "",
"FIV Result" : "",
"Transfer" : "",
"Akbash" : "",
"Palomino" : "",
"Somali" : "",
"Find Found Animal" : "",
"Waiting List - Removal" : "",
"Profile" : "",
"The result of the FLV test" : "",
"Complete" : "",
"Litters" : "",
"Chart" : "",
"Lost animals must have a contact" : "",
"URL" : "",
"First Vaccination" : "",
"Silver" : "",
"When creating donations from the Move menu screens, mark them due instead of received" : "",
"Culling" : "",
"Comments Contain" : "",
"Tosa Inu" : "",
"When ASM should stop showing this message" : "",
"Clear" : "",
"{plural0} result found in {1} seconds. Order: {2}" : "",
"Create missing lookup values" : "",
"Donation book" : "",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "",
"Brittany Spaniel" : "",
"Defaults" : "",
"Owl" : "",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"No view permission for this report" : "",
"Add Litter" : "",
"Message Board" : "",
"In" : "",
"Old English Sheepdog" : "",
"Date found cannot be blank" : "",
"Holland Lop" : "",
"{plural2} trial adoptions have ended" : "",
"Vaccination" : "",
"Pumi" : "",
"Points for matching age group" : "",
"Form URL" : "",
"Death" : "",
"Lookup" : "",
"Eclectus" : "",
"Microchip Number" : "",
"Peruvian Inca Orchid" : "",
"{plural2} people with active reservations have not been homechecked" : "",
"Completed" : "",
"View Diets" : "",
"Donation Types" : "",
"State contains" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"Change" : "",
"Default Color" : "",
"To Retailer" : "",
"Adoptions {0}" : "",
"Adoption fee donations" : "",
"Manx" : "",
"Starts" : "",
"Boykin Spaniel" : "",
"Dandi Dinmont Terrier" : "",
"This type of movement requires a person." : "",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "",
"<Prev" : "",
"Create a new animal from this found animal record" : "",
"SQL dump (without media)" : "",
"Note" : "",
"Publishing Options" : "",
"Split pages with a species name prefix" : "",
"Show" : "",
"Greyhound" : "",
"9 months" : "",
"Pig" : "",
"Publish to PetFinder.com" : "",
"Brought In" : "внесений",
"Animal '{0}' successfully marked deceased." : "",
"When displaying person names in lists, use the format" : "",
"Payment Type" : "",
"Add message" : "",
"FLV" : "",
"SQL interface" : "",
"Animal Type" : "",
"New" : "",
"Default Test Type" : "",
"Publish to MeetAPet.com" : "",
"Online Forms" : "",
"Match this animal with the lost and found database" : "",
"Homechecked by" : "",
"Edit document" : "",
"Create found animal records from the selected forms" : "",
"{plural2} animals were euthanized" : "",
"Access them via the url 'image?mode=dbfs&id=/reports/NAME'" : "",
"{plural3} animals are not available for adoption" : "",
"Oriental Long Hair" : "",
"Completed notes upto today" : "",
"SmartTag Publisher" : "",
"Configuration" : "",
"Cash" : "",
"Reservation Date" : "",
"Husky" : "",
"Norfolk Terrier" : "",
"Description Contains" : "",
"Create a new template" : "",
"View Person Links" : "",
"on" : "",
"Polish Lowland Sheepdog" : "",
"New Task" : "",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "",
"System Options" : "",
"White and Tan" : "",
"Message" : "",
"or" : "",
"Include this information on animals shared via Facebook" : "",
"Young Adult" : "",
"No" : "",
"Can afford donation?" : "",
"Add" : "",
"Retailer movements must have a valid movement date." : "",
"{plural1} weeks." : "",
"Black and Brown" : "",
"Original Owner" : "",
"This animal is part of a cruelty case against an owner" : "",
"Active users: {0}" : "",
"More Medications" : "",
"Add Log" : "",
"Letter" : "",
"Italian Spinone" : "",
"Expiry date" : "",
"You can drag and drop animals in shelter view to change their locations." : "",
"Kishu" : "",
"If you don't select any locations, publishers will include animals in all locations." : "",
"Error" : "",
"Small" : "",
"Add Tests" : "",
"Irish Setter" : "",
"Skye Terrier" : "",
"Set the email content-type header to text/html" : "",
"Complete Tasks" : "",
"Surname" : "",
"Bulk Complete Vaccinations" : "",
"Remove the tattoo fields from animal identification details" : "",
"Add details of this email to the log after sending" : "",
"Close" : "",
"Entering 'os' in the search box will show you all shelter animals." : "",
"Change Date Required" : "",
"Gecko" : "",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"Alphabetically Z-A" : "",
"{plural1} months." : "",
"{plural3} medical treatments need to be administered today" : "",
"Plott Hound" : "",
"Thursday" : "",
"Not reconciled" : "",
"Waiting list entry successfully added." : "",
"Costs need a date and amount." : "",
"Change Report" : "",
"Shelter animals" : "",
"Hold until" : "",
"{plural3} animals were reclaimed by their owners" : "",
"Treatment marked as given for {0} - {1}" : "",
"Sizes" : "",
"Import a CSV file" : "",
"Criteria:" : "",
"NNN or NN = number unique for this type of animal for this year" : "",
"A movement must have a reservation date or type." : "",
"You have unsaved changes, are you sure you want to leave this page?" : "",
"Black and Brindle" : "",
"Vertical Pitch" : "",
"Password" : "",
"Transfer In" : "",
"Forenames" : "",
"(master user, not editable)" : "",
"Edit {0}" : "",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Leukaemia" : "",
"Start at" : "",
"All vets on file." : "",
"Entry" : "",
"Once assigned, codes cannot be changed" : "",
"This will permanently remove this person, are you sure?" : "",
"View littermates" : "",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "",
"Breeds" : "",
"Forms need a name." : "",
"The date this animal was put on the waiting list" : "",
"First Names" : "",
"Import complete with {plural3} errors." : "",
"This animal has movements and cannot be removed." : "",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "",
"Find Animal/Person" : "",
"Donations need at least one date, an amount and a person." : "",
"Update publishing options" : "",
"Deceased" : "",
"Image file" : "",
"Only show cruelty cases" : "",
"Urgencies" : "",
"Default transaction view" : "",
"Fox Terrier" : "",
"Checkered Giant" : "",
"Unknown" : "",
"Breed" : "",
"{plural1} unaltered animals have been adopted in the last month" : "",
"Black Mouth Cur" : "",
"I've finished, Don't show me this popup again." : "",
"SM Account" : "",
"Points required to appear on match report" : "",
"Cymric" : "",
"Beauceron" : "",
"Top Margin" : "",
"Brown" : "",
"Fostered" : "",
"Adoption Fee" : "",
"Due in next month" : "",
"Your CSV file should have a header row with field names ASM recognises. Please see the manual for more information." : "",
"Not Available for Adoption" : "",
"to" : "",
"Fosterer" : "",
"Moving..." : "",
"Add extra images for use in reports and documents" : "",
"American Eskimo Dog" : "",
"<<" : "",
"Schnauzer" : "",
"Tattoo Number" : "",
"Animal Selection" : "",
"Parrot (Other)" : "",
"Refresh" : "",
"Lost and Found" : "",
"Amount" : "",
"Edit donation" : "",
"Other Organisation" : "",
"Edit All Diary Notes" : "",
"The species of this animal" : "",
"Homecheck areas" : "",
"Expenses::Postage" : "",
"Home Phone" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"The movement number '{0}' is not unique." : "",
"and" : "",
"Macaw" : "",
"Jump to movements" : "",
"Canary" : "",
"Temporary Vaccination" : "",
"Medical book" : "",
"Or upload a script" : "",
"[None]" : "",
"Lost animal entries matching '{0}'." : "",
"English Toy Spaniel" : "",
"Airedale Terrier" : "",
"All members on file." : "",
"Log" : "",
"HTML/FTP Publisher" : "",
"{plural1} shelter animals have people looking for them" : "",
"Date Of Birth" : "Дата народження",
"None" : "",
"Points for matching zipcode" : "",
"Publish HTML via FTP" : "",
"This person has been under investigation" : "",
"A list of areas this person will homecheck - eg: S60 S61" : "",
"Parrotlet" : "",
"Vaccinate" : "",
"All volunteers on file." : "",
"Vaccination Given" : "",
"Finnish Spitz" : "",
"Abuse" : "",
"Date Removed" : "",
"Vaccinations" : "",
"People with active reservations, but no homecheck has been done." : "",
"Cancel unadopted reservations after" : "",
"{plural2} results found in {1} seconds. Order: {2}" : "",
"Performed" : "",
"Default image for documents" : "",
"No matches found." : "",
"Log entries need a date and text." : "",
"Short" : "",
"Puli" : "",
"{0} received in donations" : "",
"Future notes" : "",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "",
"Recently Adopted" : "",
"Pass Homecheck" : "",
"Contact" : "",
"Include cruelty case animals" : "",
"GiftAid" : "",
"No data to show on the report." : "",
"Go the system users screen and add user accounts for your staff." : "",
"Thai Ridgeback" : "",
"Shelter stats (all time)" : "",
"Add Media" : "",
"Prairie Dog" : "",
"Return date cannot be before the movement date." : "",
"FLV Result" : "",
"Tennessee Walker" : "",
"Date and notes are mandatory." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"Logged in Facebook user" : "",
"Pension" : "",
"Aug" : "",
"View Staff Person Records" : "",
"Generate a document from this animal" : "",
"Unspecified" : "",
"Edit voucher" : "",
"Animal" : "",
"Standard" : "",
"estimate" : "",
"Male" : "",
"Edit Online Forms" : "",
"Pot Bellied" : "",
"Cost Type" : "",
"Liver" : "",
"Display Index" : "",
"Died {0}" : "",
"Medical profiles" : "",
"more" : "",
"Change Vaccinations" : "",
"When matching lost animals, include shelter animals" : "",
"Multi-Lookup" : "",
"Add Animals" : "",
"Su" : "",
"Wire-haired Pointing Griffon" : "",
"American" : "",
"CSV of person data" : "",
"Found" : "",
"Sa" : "",
"Miniature Pinscher" : "",
"Jersey Wooly" : "",
"Produce a CSV File" : "",
"New Voucher" : "",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "",
"Manually enter codes (do not generate)" : "",
"{plural2} weeks." : "",
"The date the donation was received" : "",
"Open reports in a new browser tab" : "",
"Include fostered animals" : "",
"Edit diary task" : "",
"Person - Additional" : "",
"Edit the current waiting list" : "",
"RescueGroups Publisher" : "",
"Area Lost" : "",
"Bulk Complete Medical Records" : "",
"Remove short shelter code box from the animal details screen" : "",
"Add this text to all animal descriptions" : "",
"Portuguese Water Dog" : "",
"{0} cannot be blank" : "",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "",
"Go the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "",
"Incoming" : "",
"Recently Entered Shelter" : "",
"German Wirehaired Pointer" : "",
"Hidden" : "Приховано",
"All diary notes" : "",
"Animal - Death" : "",
"Shelter code {0} has already been allocated to another animal." : "",
"Expires" : "",
"English Bulldog" : "",
"New Diet" : "",
"Recently Fostered" : "",
"All animals on the shelter." : "",
"Dog" : "",
"Flags" : "",
"Withdrawal" : "",
"Dutch Shepherd" : "",
"Password for '{0}' has been reset to default of 'password'" : "",
"Brought In By" : "",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Show the internal location field" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Edit Lookups" : "",
"Code contains" : "",
"Save this waiting list entry" : "",
"Entry Category" : "",
"File" : "",
"Mail Merge" : "",
"Change Diets" : "",
"View Document Repository" : "",
"Horse" : "",
"Kyi Leo" : "",
"{plural2} reservations have been active over a week without adoption" : "",
"Enable the waiting list functionality" : "",
"Diary for {0}" : "",
"Default urgency" : "",
"The date this animal was lost" : "",
"Oct" : "",
"Movement Number" : "",
"Thumbnail size" : "",
"Silver Fox" : "",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "",
"March" : "",
"Nova Scotia Duck-Tolling Retriever" : "",
"Priority Floor" : "",
"Wirehaired Terrier" : "",
"Estimate" : "",
"Treatment name cannot be blank" : "",
"All animals who are currently held in case of reclaim." : "",
"Edit report" : "",
"Last name" : "",
"Select a person to attach this form to." : "",
"Retailer Book" : "",
"Senior" : "",
"Include this image when publishing" : "",
"Mark new animals as not for adoption" : "",
"Cell" : "",
"Match Lost and Found" : "",
"Generate documentation" : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"Waiting List: {0}" : "",
"Generate Documents" : "",
"Terrapin" : "",
"Rat" : "",
"Chihuahua" : "",
"Diary task items need a pivot, subject and note." : "",
"Delete Animals" : "",
"Warn when adopting to a person who has not been homechecked" : "",
"Updated." : "",
"Facebook Sharing" : "",
"Delete Vaccinations" : "",
"Search sort order" : "",
"Rhea" : "",
"Donor" : "",
"Treatment Given" : "",
"Reservation book" : "",
"Expenses::Water" : "",
"Lowest" : "",
"Medium" : "",
"New Role" : "",
"{plural2} tests need to be performed today" : "",
"Default Entry Reason" : "",
"This animal should be held in case it is reclaimed" : "",
"Flag" : "",
"This person is linked to an animal and cannot be removed." : "",
"Create a new document" : "",
"Report" : "",
"Add diary task" : "",
"Move an animal to a retailer" : "",
"Weekly" : "",
"Open records in a new browser tab" : "",
"Lookups" : "",
"New Zealand" : "",
"Receive" : "",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "",
"{plural1} tests need to be performed today" : "",
"Sealyham Terrier" : "",
"Age Group 3" : "",
"Age Group 2" : "",
"Age Group 1" : "",
"Age Group 7" : "",
"Age Group 6" : "",
"Age Group 5" : "",
"Age Group 4" : "",
"Age Group 8" : "",
"Default Brought In By" : "",
"Neutered" : "",
"Delete" : "",
"You can set a default amount for different donation types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "",
"Video Link" : "",
"Waiting list entry for {0} ({1})" : "",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "",
"Returned To Owner" : "",
"Generate a random name for this animal" : "",
"Clear tables before importing" : "",
"Turkish Van" : "",
"Upload Document" : "",
"Make this the default image when viewing this record and publishing to the web" : "",
"Diary task: {0}" : "",
"Leonberger" : "",
"{plural1} animals were euthanized" : "",
"{plural2} animals were adopted" : "",
"This Week" : "",
"Fri" : "",
"Siberian" : "",
"Saluki" : "",
"The selected file is not an image." : "",
"Bluetick Coonhound" : "",
"{plural1} medical treatments need to be administered today" : "",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "",
"Advanced find animal screen defaults to on shelter" : "",
"Add Vouchers" : "",
"Foxhound" : "",
"Attach this form to an existing person" : "",
"Issue a new insurance number for this animal/adoption" : "",
"This animal has a tattoo" : "",
"Toy Fox Terrier" : "",
"Started" : "",
"{plural2} vaccinations need to be administered today" : "",
"Italian Greyhound" : "",
"View Animal Vet" : "",
"Reservations must have a valid reservation date." : "",
"Date put on" : "",
"Puppies (under {0} months)" : "",
"This animal has been heartworm tested" : "",
"Gift Aid" : "",
"Edit Header/Footer" : "",
"8 weeks" : "",
"Tattoo Date" : "",
"People Looking For" : "",
"The date the trial adoption is over" : "",
"Add medical regimen" : "",
"Create a new waiting list entry from this found animal record" : "",
"Did you know?" : "",
"Show tips on the home page" : "",
"(blank)" : "",
"Reason not from Owner" : "",
"Edit medical regimen" : "",
"British Shorthair" : "",
"Delete Litter" : "",
"Income::WaitingList" : "",
"Cancel" : "",
"Devon Rex" : "",
"Save and leave" : "",
"Loading..." : "",
"Reason for Entry" : "",
"Default Size" : "",
"Current" : "",
"Received in last year" : "",
"{0} {1} aged {2}" : "",
"The coat type of this animal" : "",
"Users" : "",
"Shelter code cannot be blank" : "",
"Parakeet (Other)" : "",
"Add a test" : "",
"Treeing Walker Coonhound" : "",
"{plural0} month." : "",
"Media notes contain" : "",
"Calico" : "",
"American Curl" : "",
"Thu" : "",
"Chart (Steps)" : "",
"Entry Reasons" : "",
"{plural1} years." : "",
"Change Tests" : "",
"Add Document to Repository" : "",
"New Vaccination" : "",
"{plural1} results found in {1} seconds. Order: {2}" : "",
"Bernese Mountain Dog" : "",
"Cost" : "",
"Publish to HelpingLostPets.com" : "",
"Edit additional field" : "",
"Address contains" : "",
"Clumber Spaniel" : "",
"The date the owner last contacted the shelter" : "",
"Total donations" : "",
"T = first letter of animal type" : "",
"Unsuitable Accomodation" : "",
"Show the color field" : "",
"Extra images" : "",
"Rediarised" : "",
"The date the foster is effective from" : "",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "",
"Vietnamese Pot Bellied" : "",
"Medicate Animal" : "",
"Delete Donation" : "",
"Field names should not contain spaces." : "",
"Reconciled" : "",
"less" : "",
"A unique reference for this litter" : "",
"Link" : "",
"Generate" : "",
"Select date for diary task" : "",
"Add lost animal" : "",
"Messages successfully sent" : "",
"Rottweiler" : "",
"(everyone)" : "",
"Search returned {0} results." : "",
"MM = current month" : "",
"Found Animal - Additional" : "",
"Quaker Parakeet" : "",
"Display" : "",
"You can use incoming forms to create new records or attach them to existing people." : "",
"Chart (Point)" : "",
"(any)" : "",
"{0} is running ({1}% complete)." : "",
"Held" : "",
"The date the animal was heartworm tested" : "",
"Female" : "",
"Mini Rex" : "",
"Hairless" : "",
"Bombay" : "",
"Dogo Argentino" : "",
"Confirm Password" : "",
"Do not show" : "",
"Lost animal - {0} {1} [{2}]" : "",
"For" : "",
"Create" : "",
"First name(s)" : "",
"Work Phone" : "",
"CSV of animal/adopter data" : "",
"Kai Dog" : "",
"This animal has been altered" : "",
"Good With Children" : "",
"Execute the SQL in the box below" : "",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "",
"Belgian Shepherd Tervuren" : "",
"Change Date Given" : "",
"Phone" : "",
"Norwegian Elkhound" : "",
"Extra Images" : "",
"Select all" : "",
"Entered shelter" : "",
"Expenses::Food" : "",
"1 year" : "",
"Least recently changed" : "",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "",
"Delete Report" : "",
"Manual" : "",
"{plural2} months." : "",
"Income::EntryDonation" : "",
"{plural3} animals entered the shelter" : "",
"Not for adoption" : "",
"{plural2} animals died" : "",
"Ostrich" : "",
"Maremma Sheepdog" : "",
"German Pinscher" : "",
"Brindle and White" : "",
"Publish Animals to the Internet" : "",
"Special Needs" : "Спеціальні потреби",
"Diary notes need a date and subject." : "",
"FIV/L Test Date" : "",
"Death Reason" : "",
"Date of birth cannot be blank" : "",
"Medicate" : "",
"Unit" : "",
"Stop Publishing" : "",
"Brindle" : "",
"Yes" : "",
"Setter" : "",
"Keeshond" : "",
"Edit notes" : "",
"Edit log" : "",
"No publishers are running." : "",
"Harlequin" : "",
"End at" : "",
"Florida White" : "",
"Diary tasks need a name." : "",
"6 months" : "",
"Delete Accounts" : "",
"This animal has active reservations, they will be cancelled." : "",
"This month" : "",
"Eligible for gift aid" : "",
"Homechecker" : "",
"Waiting list entries must have a contact" : "",
"On shelter for {0} days. Total cost: {1}" : "",
"Euthanized" : "",
"Homechecked" : "",
"Copy from animal comments" : "",
"Permanent Foster" : "",
"{plural3} weeks." : "",
"Donkey" : "",
"Location and Unit" : "",
"Emu" : "",
"Account code cannot be blank." : "",
"Cavalier King Charles Spaniel" : "",
"{plural1} animals entered the shelter" : "",
"Th" : "",
"Remove the declawed box from animal health details" : "",
"All Publishers" : "",
"Issued" : "",
"Delete Investigation" : "",
"YY or YYYY = current year" : "",
"German Shorthaired Pointer" : "",
"Very Large" : "",
"Done" : "",
"People with overdue donations." : "",
"Unaltered" : "",
"Non-Shelter" : "",
"This person has not passed a homecheck." : "",
"Applehead Siamese" : "",
"City" : "",
"Bullmastiff" : "",
"Name Contains" : "",
"Australian Shepherd" : "",
"{plural0} reservation has been active over a week without adoption" : "",
"Number of animal links to show" : "",
"Successfully attached to {0}" : "",
"Share this animal on Facebook" : "",
"Shetland Sheepdog Sheltie" : "",
"View Vaccinations" : "",
"Abandoned" : "",
"{plural1} people have overdue donations" : "",
"The date this animal was returned to its owner" : "",
"The period in days before waiting list urgency is increased" : "",
"Sorrel" : "",
"Add a new person" : "",
"{plural2} animals were reclaimed by their owners" : "",
"White and Brown" : "",
"Import complete with {plural0} error." : "",
"Last Location" : "",
"Extra-Toes Cat (Hemingway Polydactyl)" : "",
"Canadian Hairless" : "",
"Bearded Collie" : "",
"New Report" : "",
"{0} results." : "",
"Cow" : "",
"Investigator" : "",
"Expenses::Gas" : "",
"You can sort tables by clicking on the column headings." : "",
"Belgian Shepherd Laekenois" : "",
"Execute Script" : "",
"Include Removed" : "",
"Auto log users out after this many minutes of inactivity" : "",
"Bedlington Terrier" : "",
"Camel" : "",
"Cost record" : "",
"Telephone Bills" : "",
"Select a person" : "",
"Reupload animal images every time" : "",
"Publisher Logs" : "",
"Points for matching color" : "",
"Date Brought In" : "",
"Animals per page" : "",
"Vet" : "",
"Found animals must have a contact" : "",
"Password successfully changed." : "",
"Tortie" : "",
"Checked By" : "",
"Publish to AdoptAPet.com" : "",
"Bobtail" : "",
"The date the transfer is effective from" : "",
"New Fosterer" : "",
"New task detail" : "",
"(use system)" : "",
"Japanese Bobtail" : "",
"Delete Diets" : "",
"The date the animal was FIV/L tested" : "",
"Chocolate" : "",
"The name of the page you want to post to (eg: Your Humane Society). Leave blank to post to your wall." : "",
"Snake" : "",
"Both" : "",
"Membership Number" : "",
"Tortoise" : "",
"English Setter" : "",
"This animal has special needs" : "",
"Delete Diary" : "",
"Swedish Vallhund" : "",
"Yellow and Grey" : "",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "",
"Movement Type" : "",
"Pheasant" : "",
"Links" : "",
"Points for matching sex" : "",
"Tortie and White" : "",
"Update the daily boarding cost for this animal" : "",
"{plural2} years." : "",
"Species Z-A" : "",
"Littermates" : "",
"Add online form" : "",
"Delete Waiting List" : "",
"{plural2} animals entered the shelter" : "",
"Lost Animal: {0}" : "",
"Pekingese" : "",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "",
"Confirm" : "",
"Single Treatment" : "",
"One-Off" : "",
"Gas Bills" : "",
"Entlebucher" : "",
"Stolen" : "",
"Spitz" : "",
"Email" : "",
"Crossbreed animal should have different breeds" : "",
"Age Groups" : "",
"{0} - {1} {2}" : "",
"English Lop" : "",
"Return an animal from another movement" : "",
"4 weeks" : "",
"Test Types" : "",
"You can change how ASM looks by choosing a new theme under Settings-Options- Shelter Details-Visual Theme." : "",
"Chart (Pie)" : "",
"Add a new animal" : "",
"Invalid username or password." : "",
"User Accounts" : "",
"Portugese Podengo" : "",
"PetFinder Publisher" : "",
"Status" : "",
"Released To Wild {0}" : "",
"Euthanized {0}" : "",
"Foster successfully created." : "",
"Show the size field" : "",
"Mail Merge - {0}" : "",
"View Log" : "",
"Ringneck/Psittacula" : "",
"Turtle" : "",
"Pony" : "",
"{plural3} people have overdue donations" : "",
"Inactive - do not include" : "",
"Forbidden" : "",
"Copy of {0}" : "",
"{plural0} animal entered the shelter" : "",
"Death Comments" : "",
"Normal user" : "",
"Briard" : "",
"Row" : "",
"Finnish Lapphund" : "",
"Siberian Husky" : "",
"Egyptian Mau" : "",
"Test Animal" : "",
"Show the litter ID field" : "",
"ACO" : "",
"Shares" : "",
"Quick Links" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"Irish Wolfhound" : "",
"Tiger" : "",
"Dead on arrival" : "",
"Remember me on this computer" : "",
"Forgotten password?" : "",
"Add a diary note" : "",
"Invalid time, times should be in HH:MM format" : "",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"{0} treatments every {1} days" : "",
"Send via email" : "",
"Homecheck Areas" : "",
"All notes upto today" : "",
"Animals" : "",
"{0} {1}: posted to Facebook page {2} by {3}" : "",
"inches" : "",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "",
"Time on shelter" : "",
"The reason the owner wants to part with the animal" : "",
"Edit account" : "",
"{plural0} vaccination needs to be administered today" : "",
"Tests need an animal and at least a required date." : "",
"{plural0} unaltered animal has been adopted in the last month" : "",
"Lilac" : "",
"Loan" : "",
"Munchkin" : "",
"Books" : "",
"American Wirehair" : "",
"Type of animal links to show" : "",
"Log Types" : "",
"Iguana" : "",
"English Springer Spaniel" : "",
"Papillon" : "",
"Belgian Shepherd Dog Sheepdog" : "",
"Tu" : "",
"Attach" : "",
"Good with children" : "",
"Reserve an animal" : "",
"Adopt an animal" : "",
"{0}: {1} {2} - {3} {4}" : "",
"Find animal" : "",
"Map" : "",
"Mar" : "",
"May" : "",
"Default Return Reason" : "",
"{plural3} urgent entries on the waiting list" : "",
"Waiting List" : "",
"A4" : "",
"View Vouchers" : "",
"White and Liver" : "",
"Publisher Breed" : "",
"Diary date cannot be blank" : "",
"Quarantine" : "",
"Last Week" : "",
"The date reported to the shelter" : "",
"Skunk" : "",
"Dialog title" : "",
"The reason this animal was removed from the waiting list" : "",
"Is this a trial adoption?" : "",
"Tan and White" : "",
"Comments contain" : "",
"Set publishing options" : "",
"Reservation successfully created." : "",
"Satin" : "",
"Unable to Afford" : "",
"Ibizan Hound" : "",
"Edit litters" : "",
"{plural0} year" : "",
"{0} {1} ({2} treatments)" : "",
"Monthly" : "",
"Bengal" : "",
"Delete this record" : "",
"This animal has the same name as another animal recently added to the system." : "",
"Asset" : "",
"Last changed by {0} on {1}" : "",
"Homecheck Date" : "",
"Person Flags" : "",
"The shelter category for this animal" : "",
"Parent" : "",
"Returning" : "",
"{plural1} reservations have been active over a week without adoption" : "",
"SMTP server" : "",
"Remove the online form functionality from menus" : "",
"Goldfish" : "",
"Nov" : "",
"The date the reservation is effective from" : "",
"Include held animals" : "",
"Lakeland Terrier" : "",
"Diary and Messages" : "",
"Domestic Short Hair" : "",
"End Of Day" : "",
"Retailer" : "",
"Bank savings account" : "",
"Select" : "",
"Make this the default image when creating documents" : "",
"Turkey" : "",
"Index" : "",
"Diary note {0} marked completed" : "",
"Stats" : "",
"Weimaraner" : "",
"Long" : "",
"State" : "",
"American Fuzzy Lop" : "",
"The date the foster animal will be returned if known" : "",
"Animal '{0}' created with code {1}" : "",
"This animal already has an active reservation." : "",
"Passwords cannot be blank." : "",
"{plural2} people have overdue donations" : "",
"Email address" : "",
"Create this message" : "",
"Cornish Rex" : "",
"This week" : "",
"HTML" : "",
"Markings" : "",
"Internal Locations" : "",
"Remove holds after" : "",
"{plural1} year" : "",
"Remove the neutered fields from animal health details" : "",
"Only show account totals for the current period, which starts on " : "",
"Publisher" : "",
"Champagne DArgent" : "",
"Redbone Coonhound" : "",
"Borzoi" : "",
"Russian Blue" : "",
"Log Text" : "",
"Poicephalus/Senegal" : "",
"Edit medical profile" : "",
"Green" : "",
"More Vaccinations" : "",
"Default video for publishing" : "",
"Change Password" : "",
"Water Bills" : "",
"Back" : "",
"Date Put On" : "",
"Download" : "",
"Good with kids" : "",
"Prefill new media notes with the filename if left blank" : "",
"Staff" : "",
"Category" : "",
"Change Vouchers" : "",
"{plural0} year." : "",
"Lory/Lorikeet" : "",
"Opening balances" : "",
"Pigeon" : "",
"Bichon Frise" : "",
"Anatolian Shepherd" : "",
"Treatment" : "",
"American Shorthair" : "",
"Donations for animals entering the shelter" : "",
"New diary task" : "",
"Media Notes" : "",
"New Document" : "",
"Fr" : "",
"Pomeranian" : "",
"An animal cannot have multiple open movements." : "",
"Income::" : "",
"Add waiting list" : "",
"Change Litter" : "",
"Transferred Out {0}" : "",
"The animal name" : "",
"Date put on list" : "",
"3 months" : "",
"Shelter stats (this week)" : "",
"Wednesday" : "",
"Remove the insurance number field from the movement screens" : "",
"Merge" : "",
"View Medical Records" : "",
"Animal Codes" : "",
"Generate HTML from this SQL" : "",
"Show the date brought in field" : "",
"Change Cost" : "",
"Create and edit" : "",
"Litter Ref" : "",
"Animal Shelter Manager" : "",
"Tibetan Terrier" : "",
"Email signature" : "",
"{plural3} year" : "",
"Donkey/Mule" : "",
"Clone Animals" : "",
"Entry Donation" : "",
"Diets" : "",
"Split pages with a baby/adult prefix" : "",
"Not Available For Adoption" : "",
"Beagle" : "",
"Cats" : "",
"Details" : "",
"Edit movement" : "",
"Out Between" : "",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "",
"Mynah" : "",
"Delete Document from Repository" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Add additional field" : "",
"Komondor" : "",
"Date brought in is not valid" : "",
"Email successfully sent to {0}" : "",
"Notes" : "",
"M (Miscellaneous)" : "",
"Received in last month" : "",
"Transferred" : "",
"Comments copied to web preferred media." : "",
"Lizard" : "",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"Change Animals" : "",
"Australian Terrier" : "",
"Waiting List Contact" : "",
"Remove the Litter ID field from animal details" : "",
"Include non-shelter" : "",
"Pharaoh Hound" : "",
"Reserved" : "",
"Reclaimed" : "",
"New Account" : "",
"Results" : "",
"Brown and White" : "",
"Test Results" : "",
"Account Types" : "",
"FIV/L Tested" : "",
"Read the manual for more information about Animal Shelter Manager." : "",
"Non-Shelter Animal" : "",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"Otterhound" : "",
"Edit Users" : "",
"{plural1} people with active reservations have not been homechecked" : "",
"Basenji" : "",
"You can't have a return without a movement." : "",
"To Other" : "",
"Bird" : "",
"Cockatiel" : "",
"Add Lost Animal" : "",
"Lowchen" : "",
"Here are some things you should do before you start adding animals and people to your database." : "",
"Add a lost animal" : "",
"The date the litter entered the shelter" : "",
"Found animal entries matching '{0}'." : "",
"Bank deposit account" : "",
"Transfer successfully created." : "",
"Havanese" : "",
"Volunteer" : "",
"Modify Lookups" : "",
"Execute" : "",
"Alerts" : "",
"Log requires a date." : "",
"Add Investigation" : "",
"{plural1} animals died" : "",
"Cat" : "",
"Edit role" : "",
"Mother" : "",
"{plural3} days." : "",
"Cost date must be a valid date" : "",
"Intakes {0}" : "",
"View Roles" : "",
"Create person records from the selected forms" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"Delete Tests" : "",
"Add Diary" : "",
"Sat" : "",
"From" : "",
"Bonded With" : "",
"Poodle" : "",
"Send" : "",
"Next" : "",
"{plural3} reservations have been active over a week without adoption" : "",
"These options change the behaviour of the search box at the top of the page." : "",
"Entry Reason" : "",
"Send Emails" : "",
"Edit Reports" : "",
"Mark Deceased" : "",
"{plural2} months" : "",
"Delete Person" : "",
"Number in litter" : "",
"Select an animal" : "",
"Baby" : "",
"Reservation For" : "",
"Tue" : "",
"Vouchers need an issue and expiry date." : "",
"Coat Type" : "",
"Delete this waiting list entry" : "",
"{0} - {1} ({2} {3} aged {4})" : "",
"Copy animal comments to the notes field of the web preferred media for this animal" : "",
"Edit Diary Tasks" : "",
"Gordon Setter" : "",
"Lilac Tortie" : "",
"Expense" : "",
"Sphynx (hairless cat)" : "",
"Grade" : "",
"Edit system users" : "",
"Any health problems the animal has" : "",
"Online Form: {0}" : "",
"DD = current day" : "",
"Send emails" : "",
"Income::Interest" : "",
"This will permanently remove this record, are you sure?" : "",
"Only show transfers" : "",
"Add Vaccinations" : "",
"No results found." : "",
"Litters need at least a required date and number." : "",
"D (Dog)" : "",
"Foster an animal" : "",
"This animal is microchipped" : "",
"Page extension" : "",
"Remove the Rabies Tag field from animal health details" : "",
"Sheep Dog" : "",
"Adult" : "",
"Default view" : "",
"ASM" : "",
"Catahoula Leopard Dog" : "",
"Coat Types" : "",
"Entered To" : "",
"Database" : "",
"Edit investigation" : "",
"Edit report template HTML header/footer" : "",
"Settings, System user accounts" : "",
"R" : "",
"This database is locked." : "",
"Swan" : "",
"Caique" : "",
"Update animals with PetLink Microchips" : "",
"Enable visual effects" : "",
"Lost/Found" : "",
"Animal - Additional" : "",
"Moved to animal record {0}" : "",
"Escaped {0}" : "",
"Declawed" : "",
"Duck" : "",
"Information" : "",
"Rows" : "",
"This person has donations and cannot be removed." : "",
"Log Type" : "",
"SMTP username" : "",
"Edit my diary notes" : "",
"Add account" : "",
"1 week" : "",
"Real name" : "",
"Requested" : "",
"Your password is currently set to 'password'. This is highly insecure and we strongly suggest you choose a new password." : "",
"Stats period" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"Found animal - {0} {1} [{2}]" : "",
"Recently deceased" : "",
"Hidden Comments" : "",
"Return Date" : "",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"Donation From" : "",
"Fila Brasileiro" : "",
"November" : "",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "",
"SMTP password" : "",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "",
"Default Type" : "",
"Trial Adoption" : "",
"Reserve" : "",
"Don't create a cost record" : "",
"Adoption successfully created." : "",
"Animal Sponsorship" : "",
"Netherland Dwarf" : "",
"High" : "",
"Shelter" : "",
"Export" : "",
"SQL dump" : "",
"Home page" : "",
"Saturday" : "",
"Include reserved animals" : "",
"Allow entry of two donations on the Move menu screens" : "",
"Show alerts on the home page" : "",
"This will permanently remove this animal, are you sure?" : "",
"Dwarf Eared" : "",
"No data." : "",
"Microchipped" : "",
"Good with dogs" : "",
"Date reported cannot be blank" : "",
"Document" : "",
"Goat" : "",
"Waiting List - Details" : "",
"Dec" : "",
"Redirect to URL after POST" : "",
"New form field" : "",
"These are the HTML headers and footers used when generating reports." : "",
"Second Vaccination" : "",
"Add form field" : "",
"Trial adoption book" : "",
"Mon" : "",
"This Year" : "",
"Heartworm" : "",
"Rotate image 90 degrees clockwise" : "",
"Areas" : "",
"Age Group" : "",
"All people on file." : "",
"This person has movements and cannot be removed." : "",
"Coat" : "",
"Create note this many days from today, or 9999 to ask" : "",
"Add medical profile" : "",
"From Fostering" : "",
"View animals matching publishing options" : "",
"Diary Task: {0}" : "",
"The date the animal was brought into the shelter" : "",
"This will permanently remove the selected roles, are you sure?" : "",
"Date of last owner contact" : "",
"years" : "",
"Exotic Shorthair" : "",
"Test Performed" : "",
"Move" : "",
"Include animals in the following locations" : "",
"Edit online form HTML header/footer" : "",
"Cancelled Reservation" : "",
"Guinea fowl" : "",
"Good with cats" : "",
"Ginger and White" : "",
"Use the icon in the lower right of notes fields to view them in a separate window." : "",
"Area" : "",
"Cost Types" : "",
"Adoption" : "",
"Other Account" : "",
"Units" : "",
"Add a vaccination" : "",
"Coonhound" : "",
"Name cannot be blank" : "",
"Hotot" : "",
"Pit Bull Terrier" : "",
"Some info text" : "",
"Add person" : "",
"Scale published animal images to" : "",
"Change Person" : "",
"Match" : "",
"July" : "",
"Sugar Glider" : "",
"{plural0} animal is not available for adoption" : "",
"Body" : "",
"American Sable" : "",
"Lost Animal" : "",
"New Template" : "",
"Chesapeake Bay Retriever" : "",
"View Waiting List" : "",
"Border Collie" : "",
"Left shelter" : "",
"German Shepherd Dog" : "",
"Or move this diary on to" : "",
"Movement Types" : "",
"Oriental Short Hair" : "",
"Notes about the death of the animal" : "",
"Result" : "",
"Results for '{0}'." : "",
"Singapura" : "",
"Start Date" : "",
"Add role" : "",
"Delete Vouchers" : "",
"Warmblood" : "",
"Delete Lost Animal" : "",
"cm" : "",
"{0} rows affected." : "",
"Treatments" : "",
"Low" : "",
"Cairn Terrier" : "",
"Show PDF files inline instead of sending them as attachments" : "",
"Returns {0}" : "",
"New Guinea Singing Dog" : "",
"Where this animal is located within the shelter" : "",
"Jun" : "",
"{plural3} months." : "",
"Jul" : "",
"Animal cannot be deceased before it was brought to the shelter" : "",
"Llama" : "",
"Active" : "",
"Last, First" : "",
"Petit Basset Griffon Vendeen" : "",
"Bloodhound" : "",
"Feral" : "",
"Footer" : "",
"Change Waiting List" : "",
"2 weeks" : "",
"Income::Donation" : "",
"Waiting List - Additional" : "",
"Valid tokens for the subject and text" : "",
"An age in years, eg: 1, 0.5" : "",
"Number" : "",
"Add a person" : "",
"Fish" : "",
"Upload Photo" : "",
"{0} treatments every {1} years" : "",
"Received" : "",
"View Media" : "",
"Membership Expiry" : "",
"Afghan Hound" : "",
"Coton de Tulear" : "",
"View publishing logs" : "",
"Shelter view" : "",
"Maine Coon" : "",
"Brown and Black" : "",
"Match against other lost/found animals" : "",
"Diary Tasks" : "",
"Lost and found entries must have a contact" : "",
"Snowshoe" : "",
"Additional Fields" : "",
"This Month" : "",
"Neuter/Spay" : "",
"Medical" : "",
"Grey" : "",
"Email this person" : "",
"Out SubTotal" : "",
"Log requires a person." : "",
"No adjustment" : "",
"{0} record(s) match the mail merge." : "",
"Stray" : "",
"Altered Date" : "",
"A person is required for this movement type." : "",
"Burmilla" : "",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "",
"{plural3} results found in {1} seconds. Order: {2}" : "",
"{plural3} weeks" : "",
"Include retailer animals" : "",
"User roles" : "",
"On Shelter" : "",
"Similar Animal" : "",
"English Pointer" : "",
"This animal died outside the care of the shelter, and the death should be kept out of reports" : "",
"Collie" : "",
"Sunday" : "",
"Xoloitzcuintle/Mexican Hairless" : "",
"Good with Cats" : "",
"Remaining" : "",
"Settings, Reports" : "",
"Income from an on-site shop" : "",
"Angora Rabbit" : "",
"Add test" : "",
"Died off shelter" : "",
"Change Media" : "",
"Friday" : "",
"Pionus" : "",
"Ends after" : "",
"Expiry" : "",
"No results." : "",
"Reason" : "",
"Omit criteria" : "",
"Transfer an animal" : "",
"Separate waiting list rank by species" : "",
"By" : "",
"Unable to Cope" : "",
"Adoptable" : "",
"Merge another person into this one" : "",
"English Cocker Spaniel" : "",
"Change Lost Animal" : "",
"{plural0} urgent entry on the waiting list" : "",
"This animal should not be included when publishing animals for adoption" : "",
"Date Lost" : "",
"To Fostering" : "",
"Number of Tasks" : "",
"The date the animal died" : "",
"Import complete with {plural2} errors." : "",
"Hold the animal until this date or blank to hold indefinitely" : "",
"View PDF" : "",
"Edit roles" : "",
"Facebook page name" : "",
"You must supply a code." : "",
"SQL" : "",
"Polish" : "",
"Remove the coat type field from animal details" : "",
"Adoption movements must have a valid adoption date." : "",
"Waiting List {0}" : "",
"Adopted Transferred In {0}" : "",
"Gerbil" : "",
"Sun" : "",
"Greater Swiss Mountain Dog" : "",
"Date removed" : "",
"Photo successfully uploaded." : "",
"Finch" : "",
"Most recently changed" : "",
"Yorkshire Terrier Yorkie" : "",
"Preview" : "",
"The base color of this animal" : "",
"Date of Birth" : "",
"Tan and Black" : "",
"People" : "",
"Area Postcode" : "",
"Edit user" : "",
"Incomplete notes upto today" : "",
"Edit diary" : "",
"Receipt No" : "",
"{plural0} month" : "",
"Server clock adjustment" : "",
"This animal has been declawed" : "",
"Show a minimap of the address on person screens" : "",
"All existing animals, people, movements and donations in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "",
"Mountain Cur" : "",
"Tibetan Mastiff" : "",
"Default Log Filter" : "",
"Rat Terrier" : "",
"Weight" : "",
"Set to 0 to never update urgencies." : "",
"December" : "",
"Edit My Diary Notes" : "",
"Generate letters" : "",
"All retailers on file." : "",
"Lifetime" : "",
"Shelter Animals" : "",
"Pointer" : "",
"Rosella" : "",
"Marriage/Relationship split" : "",
"Set to 0 for no limit." : "",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "",
"Foster Book" : "",
"Kuvasz" : "",
"Find Person" : "",
"Trial ends on" : "",
"Bank account interest" : "",
"Shelter stats (today)" : "",
"Number of fields" : "",
"Waiting list donations" : "",
"Not available for adoption" : "",
"Remove the location unit field from animal details" : "",
"Golden Retriever" : "",
"Remove the city/state fields from person details" : "",
"HTML Publishing Templates" : "",
"Entry reason" : "",
"Will this owner give a donation?" : "",
"Chow Chow" : "",
"Publish to RescueGroups.org" : "",
"New User" : "",
"Receipt/Invoice" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"or estimated age in years" : "",
"Balance" : "",
"Alphabetically A-Z" : "",
"Lost from" : "",
"More Tests" : "",
"Show quick links on the home page" : "",
" days." : "",
"Delete this person" : "",
"{plural0} week." : "",
"Entered (newest first)" : "",
"English Spot" : "",
"Default Death Reason" : "",
"Insurance" : "",
"Chart (Line)" : "",
"Saving..." : "",
"Updated database to version {0}" : "",
"Attach link" : "",
"One Off" : "",
"Cruelty Case" : "",
"Received in last week" : "",
"The entry reason for this animal" : "",
"5 Year" : "",
"{plural1} animals were adopted" : "",
"Save" : "",
"French Bulldog" : "",
"Banned" : "",
"{plural0} animal died" : "",
"Board and Food" : "",
"Asset::Premises" : "",
"View Cost" : "",
"Enable lost and found functionality" : "",
"February" : "",
"Kakariki" : "",
"are sent to" : "",
"There is not enough information in the form to create a person record (need a surname)." : "",
"Lost" : "",
"White and Grey" : "",
"Left Margin" : "",
"Remove" : "",
"Annually" : "",
"April" : "",
"New Donation" : "",
"Area where the animal was found" : "",
"Morgan" : "",
"Incoming Forms" : "",
"Display a search button at the right side of the search box" : "",
"{plural0} animal was reclaimed by its owner" : "",
"{plural3} vaccinations need to be administered today" : "",
"Overdue" : "",
"View Incoming Forms" : "",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "",
"Beveren" : "",
"Black Tortie" : "",
"Description cannot be blank" : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : ""
}
|
aubzen/sheltermanager
|
src/locale/locale_uk.py
|
Python
|
gpl-3.0
| 80,712
|
[
"Amber",
"VisIt"
] |
4b3fbabcbe333f9e684168a54ad7df10192804bb8246b9ee524190cf33185759
|
#------------------------------------------------------------------------------
# pycparser: c-to-c.py
#
# Example of using pycparser.c_generator, serving as a simplistic translator
# from C to AST and back to C.
#
# Copyright (C) 2008-2015, Eli Bendersky
# License: BSD
#------------------------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_parser, c_generator
def translate_to_c(filename):
""" Simply use the c_generator module to emit a parsed AST.
"""
ast = parse_file(filename, use_cpp=True)
generator = c_generator.CGenerator()
print(generator.visit(ast))
def _zz_test_translate():
# internal use
src = r'''
void f(char * restrict joe){}
int main(void)
{
unsigned int long k = 4;
int p = - - k;
return 0;
}
'''
parser = c_parser.CParser()
ast = parser.parse(src)
ast.show()
generator = c_generator.CGenerator()
print(generator.visit(ast))
# tracing the generator for debugging
#~ import trace
#~ tr = trace.Trace(countcallers=1)
#~ tr.runfunc(generator.visit, ast)
#~ tr.results().write_results()
#------------------------------------------------------------------------------
if __name__ == "__main__":
#_zz_test_translate()
if len(sys.argv) > 1:
translate_to_c(sys.argv[1])
else:
print("Please provide a filename as argument")
|
johncsnyder/SwiftKitten
|
pycparser/examples/c-to-c.py
|
Python
|
mit
| 1,578
|
[
"VisIt"
] |
bd9958ed1a37df64ed6ebfc4d862ef8880babcff2bd1906394c5f7a0f319cd2b
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
mol = gto.Mole()
mol.atom = '''
He .5 .5 -.5
He 1. .2 .3
He .1 -.1 .1 '''
mol.basis = {'He': [(0, (.5, 1)),
(1, (.6, 1)),
(2, (.8, 1))]}
mol.build()
class KnowValues(unittest.TestCase):
def test_cint1e_r2_origi(self):
ref = mol.intor('cint1e_r2_origi_sph')
dat = mol.intor('cint1e_pbc_r2_origi_sph')
self.assertTrue(numpy.allclose(ref, dat))
def test_cint1e_r4_origi(self):
ref = mol.intor('cint1e_r4_origi_sph')
dat = mol.intor('cint1e_pbc_r4_origi_sph')
self.assertTrue(numpy.allclose(ref, dat))
def test_cint3c1e_r2_origk(self):
ref = mol.intor('cint3c1e_r2_origk_sph')
dat = mol.intor('cint3c1e_pbc_r2_origk_sph')
self.assertTrue(numpy.allclose(ref, dat))
def test_cint3c1e_r4_origk(self):
ref = mol.intor('cint3c1e_r4_origk_sph')
dat = mol.intor('cint3c1e_pbc_r4_origk_sph')
self.assertTrue(numpy.allclose(ref, dat))
def test_cint3c1e_r6_origk(self):
ref = mol.intor('cint3c1e_r6_origk_sph')
dat = mol.intor('cint3c1e_pbc_r6_origk_sph')
self.assertTrue(numpy.allclose(ref, dat))
if __name__ == '__main__':
print('Full Tests for int1e_pbc')
unittest.main()
|
gkc1000/pyscf
|
pyscf/lib/gto/test/test_int1e_pbc.py
|
Python
|
apache-2.0
| 1,979
|
[
"PySCF"
] |
aaccecc4751f02fdae70a538625d854b59d761b0974b4e8b03ca5c0710409dfb
|
# -*- coding: utf-8 -*-
#
# one-neuron-with-noise.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import nest.voltage_trace
nest.ResetKernel()
neuron = nest.Create("iaf_neuron")
noise = nest.Create("poisson_generator", 2)
nest.SetStatus(noise, [{"rate": 80000.0}, {"rate": 15000.0}])
voltmeter = nest.Create("voltmeter")
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
nest.ConvergentConnect(noise, neuron, [1.2, -1.0], 1.0)
nest.Connect(voltmeter, neuron)
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
|
BlueBrain/NEST
|
pynest/examples/one-neuron-with-noise.py
|
Python
|
gpl-2.0
| 1,197
|
[
"NEURON"
] |
760c9303e39bc0b21c41993984f4ae8fc6b2f5353a2d5a260f0cdae48506163a
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def glm_solvers():
predictors = ["displacement","power","weight","acceleration","year"]
for solver in ["AUTO", "IRLSM", "L_BFGS", "COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT"]:
print("Solver = {0}".format(solver))
for family in ["binomial", "gaussian", "poisson", "tweedie", "gamma"]:
if family == 'binomial': response_col = "economy_20mpg"
elif family == 'gaussian': response_col = "economy"
else: response_col = "cylinders"
print("Family = {0}".format(family))
training_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
if family == 'binomial': training_data[response_col] = training_data[response_col].asfactor()
else: training_data[response_col] = training_data[response_col].asnumeric()
model = H2OGeneralizedLinearEstimator(family=family, alpha=0, Lambda=1e-5, solver=solver)
model.train(x=predictors, y=response_col, training_frame=training_data)
h2o.remove(training_data)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_solvers)
else:
glm_solvers()
|
nilbody/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_solvers_glm.py
|
Python
|
apache-2.0
| 1,311
|
[
"Gaussian"
] |
b08c41f8cebaa495f1db87c44fb0fbdfcea506c2aaf6316f52de6ebb1dc45a35
|
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import logger
import AbinsModules
class AbinsLoadCRYSTALTest(unittest.TestCase, AbinsModules.GeneralLoadDFTTester):
def tearDown(self):
AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCRYSTAL"])
# *************************** USE CASES ********************************************
# ===================================================================================
# | Use cases: Gamma point calculation for CRYSTAL |
# ===================================================================================
_gamma_crystal = "crystalB3LYP_LoadCRYSTAL"
_set_crystal = "crystal_set_key_LoadCRYSTAL"
# ===================================================================================
# | Use case: Molecular calculation for CRYSTAL |
# ===================================================================================
_molecule = "toluene_molecule_LoadCRYSTAL"
# ===================================================================================
# | Use cases: Phonon dispersion calculation for CRYSTAL |
# ===================================================================================
_phonon_dispersion_v1 = "mgo-GX_LoadCRYSTAL"
_phonon_dispersion_v2 = "MgO-222-DISP_LoadCRYSTAL"
def test_gamma_crystal(self):
self.check(name=self._gamma_crystal, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._set_crystal, loader=AbinsModules.LoadCRYSTAL)
def test_molecule(self):
self.check(name=self._molecule, loader=AbinsModules.LoadCRYSTAL)
def test_phonon_dispersion_crystal(self):
self.check(name=self._phonon_dispersion_v1, loader=AbinsModules.LoadCRYSTAL)
self.check(name=self._phonon_dispersion_v2, loader=AbinsModules.LoadCRYSTAL)
if __name__ == '__main__':
unittest.main()
|
dymkowsk/mantid
|
scripts/test/AbinsLoadCRYSTALTest.py
|
Python
|
gpl-3.0
| 2,060
|
[
"CRYSTAL"
] |
f00c4c4f82168b039f02682c26f8f153cc727c2bceea664c0779704af0fd6715
|
# -*- coding: utf-8 -*-
# Vahid Moosavi 2015 08 08 10:50 am
#sevamoo@gmail.com
#Chair For Computer Aided Architectural Design, ETH Zurich
# Future Cities Lab
#www.vahidmoosavi.com
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import numexpr as ne
from time import time
import scipy.spatial as spdist
import timeit
import sys
from sklearn.externals.joblib import Parallel, delayed
from sklearn.externals.joblib import load, dump
import tempfile
import shutil
import os
import itertools
from scipy.sparse import csr_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from sklearn import neighbors
from matplotlib.colors import LogNorm
from matplotlib import cm
import matplotlib
import pandas as pd
class SOM(object):
def __init__(self,name,Data, mapsize = None, norm_method = 'var',initmethod = 'pca',neigh='Guassian'):
"""
name and data, neigh== Bubble or Guassian
"""
self.name = name
self.data_raw = Data
if norm_method == 'var':
Data = normalize(Data, method=norm_method)
self.data = Data
else:
self.data = Data
self.dim = Data.shape[1]
self.dlen = Data.shape[0]
self.set_topology(mapsize = mapsize)
self.set_algorithm(initmethod = initmethod)
self.calc_map_dist()
self.neigh = neigh
#Slow for large data sets
#self.set_data_labels()
#set SOM topology
def set_topology(self, mapsize = None, mapshape = 'planar', lattice = 'rect', mask = None, compname = None):
"""
all_mapshapes = ['planar','toroid','cylinder']
all_lattices = ['hexa','rect']
"""
self.mapshape = mapshape
self.lattice = lattice
#to set mask
if mask == None:
self.mask = np.ones([1,self.dim])
else:
self.mask = mask
#to set map size
if mapsize == None:
tmp = int(round(np.sqrt(self.dlen)))
self.nnodes = tmp
self.mapsize = [int(3./5*self.nnodes), int(2./5*self.nnodes)]
else:
if len(mapsize)==2:
if np.min(mapsize) == 1:
self.mapsize = [1, np.max(mapsize)]
else:
self.mapsize = mapsize
elif len(mapsize) == 1:
s = int (mapsize[0]/2)
self.mapsize = [1 ,mapsize[0]]
print 'input was considered as the numbers of nodes'
print 'map size is [{0},{1}]'.format(s,s)
self.nnodes = self.mapsize[0]*self.mapsize[1]
# to set component names
if compname == None:
try:
cc = list()
for i in range(0,self.dim):
cc.append ('Variable-'+ str(i+1))
self.compname = np.asarray(cc)[np.newaxis,:]
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
else:
try:
dim = getattr(self,'dim')
if len(compname) == dim:
self.compname = np.asarray(compname)[np.newaxis,:]
else:
print 'compname should have the same size'
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
#Set labels of the training data
# it should be in the format of a list of strings
def set_data_labels(self, dlabel = None):
if dlabel == None:
try:
dlen = (getattr(self,'dlen'))
cc = list()
for i in range(0,dlen):
cc.append ('dlabel-'+ str(i))
self.dlabel = np.asarray(cc)[:, np.newaxis]
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
else:
try:
dlen = (getattr(self,'dlen'))
if dlabel.shape == (1,dlen):
self.dlabel = dlabel.T#[:,np.newaxis]
elif dlabel.shape == (dlen,1):
self.dlabel = dlabel
elif dlabel.shape == (dlen,):
self.dlabel = dlabel[:, np.newaxis]
else:
print 'wrong lable format'
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
#calculating the grid distance, which will be called during the training steps
#currently just works for planar grids
def calc_map_dist(self):
cd = getattr(self, 'nnodes')
UD2 = np.zeros((cd, cd))
for i in range(cd):
UD2[i,:] = grid_dist(self, i).reshape(1,cd)
self.UD2 = UD2
def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005):
"""
initmethod = ['random', 'pca']
algos = ['seq','batch']
all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ]
alfa_types = ['linear','inv','power']
"""
self.initmethod = initmethod
self.algtype = algtype
self.alfaini = alfaini
self.alfafinal = alfafinal
self.neigh = neighborhoodmethod
###################################
#visualize map
def view_map(self, what = 'codebook', which_dim = 'all', pack= 'Yes', text_size = 2.8,save='No', save_dir = 'empty',grid='No',text='Yes',cmap='None',COL_SiZe=6):
mapsize = getattr(self, 'mapsize')
if np.min(mapsize) >1:
if pack == 'No':
view_2d(self, text_size, which_dim = which_dim, what = what)
else:
# print 'hi'
view_2d_Pack(self, text_size, which_dim = which_dim,what = what,save = save, save_dir = save_dir, grid=grid,text=text,CMAP=cmap,col_sz=COL_SiZe)
elif np.min(mapsize) == 1:
view_1d(self, text_size, which_dim = which_dim, what = what)
################################################################################
# Initialize map codebook: Weight vectors of SOM
def init_map(self):
dim = 0
n_nod = 0
if getattr(self, 'initmethod')=='random':
#It produces random values in the range of min- max of each dimension based on a uniform distribution
mn = np.tile(np.min(getattr(self,'data'), axis =0), (getattr(self, 'nnodes'),1))
mx = np.tile(np.max(getattr(self,'data'), axis =0), (getattr(self, 'nnodes'),1))
setattr(self, 'codebook', mn + (mx-mn)*(np.random.rand(getattr(self, 'nnodes'), getattr(self, 'dim'))))
elif getattr(self, 'initmethod') == 'pca':
codebooktmp = lininit(self) #it is based on two largest eigenvalues of correlation matrix
setattr(self, 'codebook', codebooktmp)
else:
print 'please select a corect initialization method'
print 'set a correct one in SOM. current SOM.initmethod: ', getattr(self, 'initmethod')
print "possible init methods:'random', 'pca'"
#Main loop of training
def train(self, trainlen = None, n_job = 1, shared_memory = 'no',verbose='on'):
t0 = time()
data = getattr(self, 'data')
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
mem = np.log10(dlen*nnodes*dim)
#print 'data len is %d and data dimension is %d' % (dlen, dim)
#print 'map size is %d, %d' %(mapsize[0], mapsize[1])
#print 'array size in log10 scale' , mem
#print 'nomber of jobs in parallel: ', n_job
#######################################
#initialization
if verbose=='on':
print
print 'initialization method = %s, initializing..' %getattr(self, 'initmethod')
print
t0 = time()
self.init_map()
if verbose=='on':
print 'initialization done in %f seconds' % round(time()-t0 , 3 )
########################################
#rough training
if verbose=='on':
print
batchtrain(self, njob = n_job, phase = 'rough', shared_memory = 'no',verbose=verbose)
if verbose=='on':
print
#######################################
#Finetuning
if verbose=='on':
print
batchtrain(self, njob = n_job, phase = 'finetune', shared_memory = 'no',verbose=verbose)
err = np.mean(getattr(self, 'bmu')[1])
if verbose=='on':
# or verbose == 'off':
# print
ts = round(time() - t0, 3)
print
print "Total time elapsed: %f secodns" %ts
print "final quantization error: %f" %err
if verbose=='final':
# or verbose == 'off':
# print
ts = round(time() - t0, 3)
print
print "Total time elapsed: %f secodns" %ts
print "final quantization error: %f" %err
#to project a data set to a trained SOM and find the index of bmu
#It is based on nearest neighborhood search module of scikitlearn, but it is not that fast.
def project_data(self, data):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors = 1)
labels = np.arange(0,codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
#data = normalize(data, method='var')
#plt.hist(data[:,2])
Predicted_labels = clf.predict(data)
return Predicted_labels
def predict_by(self, data, Target, K =5, wt= 'distance'):
"""
‘uniform’
"""
# here it is assumed that Target is the last column in the codebook
#and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
ind = np.arange(0,dim)
indX = ind[ind != Target]
X = codebook[:,indX]
Y = codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = wt)
clf.fit(X, Y)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:,Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:,indX]
elif dimdata == dim -1:
data = normalize_by(data_raw[:,indX], data, method='var')
#data = normalize(data, method='var')
Predicted_values = clf.predict(data)
Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return Predicted_values
def predict(self, X_test, K =5, wt= 'distance'):
"""
‘uniform’
"""
#Similar to SKlearn we assume that we have X_tr, Y_tr and X_test
# here it is assumed that Target is the last column in the codebook
#and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
Target = data_raw.shape[1]-1
X_train = codebook[:,:Target]
Y_train= codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = wt)
clf.fit(X_train, Y_train)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
X_test = normalize_by(data_raw[:,:Target], X_test, method='var')
Predicted_values = clf.predict(X_test)
Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return Predicted_values
def find_K_nodes(self, data, K =5):
from sklearn.neighbors import NearestNeighbors
# we find the k most similar nodes to the input vector
codebook = getattr(self, 'codebook')
neigh = NearestNeighbors(n_neighbors = K)
neigh.fit(codebook)
data_raw = getattr(self,'data_raw')
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
return neigh.kneighbors(data)
def ind_to_xy(self, bm_ind):
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
#bmu should be an integer between 0 to no_nodes
out = np.zeros((bm_ind.shape[0],3))
out[:,2] = bm_ind
out[:,0] = rows-1-bm_ind/cols
out[:,0] = bm_ind/cols
out[:,1] = bm_ind%cols
return out.astype(int)
def cluster(self,method='Kmeans',n_clusters=8):
import sklearn.cluster as clust
km= clust.KMeans(n_clusters=n_clusters)
labels = km.fit_predict(denormalize_by(self.data_raw, self.codebook, n_method = 'var'))
setattr(self,'cluster_labels',labels)
return labels
def hit_map(self,data=None):
#First Step: show the hitmap of all the training data
# print 'None'
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
#this is not an appropriate way, but it works
# coord[:,0] = msz[0]-coord[:,0]
###############################
fig = plt.figure(figsize=(msz[1]/5,msz[0]/5))
ax = fig.add_subplot(111)
ax.xaxis.set_ticks([i for i in range(0,msz[1])])
ax.yaxis.set_ticks([i for i in range(0,msz[0])])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=.5)
a = plt.hist2d(coord[:,1], coord[:,0], bins=(msz[1],msz[0]),alpha=.0,cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
X, Y = np.meshgrid(x, y)
area = a[0].T*12
# plt.scatter(coord[:,1]+.5, msz[0]-.5-coord[:,0], s=area, alpha=0.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
# plt.scatter(coord[:,1]+.5, msz[0]-.5-coord[:,0], s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
coord = self.ind_to_xy(np.arange(self.nnodes))
plt.scatter(coord[:,1], msz[0]-.5- coord[:,0], s=area.flatten(), alpha=.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.xlim(0,msz[1])
plt.ylim(0,msz[0])
if data != None:
proj = self.project_data(data)
msz = getattr(self, 'mapsize')
coord_d = self.ind_to_xy(proj)
a = plt.hist2d(coord_d[:,1], coord_d[:,0], bins=(msz[1],msz[0]),alpha=.0,norm = LogNorm(),cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
X, Y = np.meshgrid(x, y)
area = a[0].T*50
plt.scatter(coord_d[:,1]+.5, msz[0]-.5-coord_d[:,0], s=area, alpha=0.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.scatter(coord_d[:,1]+.5, msz[0]-.5-coord_d[:,0], s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
print 'hi'
# plt.scatter(coord[:,1], msz[0]-1-coord[:,0], s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
# plt.scatter(X, msz[0]-1-Y, s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')#
# plt.scatter(X, msz[0]-1-Y, s=area, alpha=0.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.xlim(0,msz[1])
plt.ylim(0,msz[0])
plt.show()
def U_matrix(self,distance=1,row_normalized='Yes'):
import scipy
UD2 = self.UD2
Umatrix = np.zeros((self.nnodes,1))
if row_normalized=='Yes':
vector = normalize_by(self.codebook.T, self.codebook.T, method='var').T
else:
vector = self.codebook
for i in range(self.nnodes):
codebook_i = vector[i][np.newaxis,:]
neighbor_ind = UD2[i][0:]<=distance
neighbor_codebooks = vector[neighbor_ind]
Umatrix[i] = scipy.spatial.distance_matrix(codebook_i,neighbor_codebooks).mean()
return Umatrix.reshape(self.mapsize)
def view_U_matrix(self,distance2=1,row_normalized='No',show_data='Yes',contooor='Yes',blob = 'No',save='No',save_dir = ''):
import scipy
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
umat = self.U_matrix(distance=distance2,row_normalized=row_normalized)
data = getattr(self, 'data_raw')
proj = self.project_data(data)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
# freq = plt.hist2d(coord[:,1], coord[:,0], bins=(msz[1],msz[0]),alpha=1.0,cmap=cm.jet)[0]
# plt.close()
# fig, ax = plt.figure()
fig, ax= plt.subplots(1, 1)
im = imshow(umat,cmap=cm.RdYlBu_r,alpha=1) # drawing the function
# adding the Contour lines with labels`
# imshow(freq[0].T,cmap=cm.jet_r,alpha=1)
if contooor=='Yes':
mn = np.min(umat.flatten())
mx = np.max(umat.flatten())
std = np.std(umat.flatten())
md = np.median(umat.flatten())
mx = md + 0*std
# mn = md
# umat[umat<=mn]=mn
cset = contour(umat,np.linspace(mn,mx,15),linewidths=0.7,cmap=cm.Blues)
if show_data=='Yes':
plt.scatter(coord[:,1], coord[:,0], s=2, alpha=1.,c='Gray',marker='o',cmap='jet',linewidths=3, edgecolor = 'Gray')
plt.axis('off')
ratio = float(msz[0])/(msz[0]+msz[1])
fig.set_size_inches((1-ratio)*15,ratio*15)
plt.tight_layout()
plt.subplots_adjust(hspace = .00,wspace=.000)
sel_points = list()
if blob=='Yes':
from skimage.feature import blob_dog, blob_log, blob_doh
from math import sqrt
from skimage.color import rgb2gray
image = 1/umat
image_gray = rgb2gray(image)
#'Laplacian of Gaussian'
blobs = blob_log(image, max_sigma=5, num_sigma=4, threshold=.152)
blobs[:, 2] = blobs[:, 2] * sqrt(2)
imshow(umat,cmap=cm.RdYlBu_r,alpha=1)
sel_points = list()
for blob in blobs:
row, col, r = blob
c = plt.Circle((col, row), r, color='red', linewidth=2, fill=False)
ax.add_patch(c)
dist = scipy.spatial.distance_matrix(coord[:,:2],np.array([row,col])[np.newaxis,:])
sel_point = dist <= r
plt.plot(coord[:,1][sel_point[:,0]], coord[:,0][sel_point[:,0]],'.r')
sel_points.append(sel_point[:,0])
if save=='Yes':
fig.savefig(save_dir, transparent=False, dpi=400)
return sel_points,umat
def hit_map_cluster_number(self,data=None):
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
# print 'yesyy'
else:
print 'clustering based on default parameters...'
codebook = self.cluster()
msz = getattr(self, 'mapsize')
fig = plt.figure(figsize=(msz[1]/2.5,msz[0]/2.5))
ax = fig.add_subplot(111)
# ax.xaxis.set_ticklabels([])
# ax.yaxis.set_ticklabels([])
# ax.grid(True,linestyle='-', linewidth=.5)
if data == None:
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
coord = self.ind_to_xy(proj)
cents = self.ind_to_xy(np.arange(0,msz[0]*msz[1]))
for i, txt in enumerate(codebook):
ax.annotate(txt, (cents[i,1],cents[i,0]),size=10, va="center")
if data != None:
proj = self.project_data(data)
coord = self.ind_to_xy(proj)
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
cents = self.ind_to_xy(proj)
# cents[:,1] = cents[:,1]+.2
# print cents.shape
label = codebook[proj]
for i, txt in enumerate(label):
ax.annotate(txt, (cents[i,1],cents[i,0]),size=10, va="center")
plt.imshow(codebook.reshape(msz[0],msz[1])[::],alpha=.5)
# plt.pcolor(codebook.reshape(msz[0],msz[1])[::-1],alpha=.5,cmap='jet')
plt.show()
return cents
def view_map_dot(self,which_dim='all',colormap=None,cols=None,save='No',save_dir='',text_size=8):
import matplotlib.cm as cm
if colormap==None:
colormap = plt.cm.get_cmap('RdYlBu_r')
else:
colormap = plt.cm.get_cmap(colormap)
data = self.data_raw
msz0, msz1 = getattr(self, 'mapsize')
proj = self.project_data(data)
coords = self.ind_to_xy(proj)[:,:2]
fig = plt.figure()
if cols==None:
cols=8
rows = data.shape[1]/cols+1
if which_dim == 'all':
dim = data.shape[0]
rows = len(which_dim)/cols+1
no_row_in_plot = dim/cols + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = cols
h = .1
w= .1
fig = plt.figure(figsize=(no_col_in_plot*2.5*(1+w),no_row_in_plot*2.5*(1+h)))
for i in range(data.shape[1]):
plt.subplot(rows,cols,i+1)
#this uses the colors uniquely for each record, while in normal views, it is based on the values within each dimensions.
#This is important when we are dealing with time series. Where we don't want to normalize colors within each time period, rather we like to see th
#the patterns of each data records in time.
mn = np.min(data[:,:],axis=1)
mx = np.max(data[:,:],axis=1)
# print mx.shape
# print coords.shape
for j in range(data.shape[0]):
sc = plt.scatter(coords[j,1],self.mapsize[0]-1-coords[j,0],c=data[j,which_dim[i]],vmax=mx[j],vmin=mn[j],s=90,marker='.',edgecolor='None', cmap=colormap ,alpha=1)
mn = dat# a[:,i].min()
# mx = data[:,i].max()
# plt.scatter(coords[:,1],self.mapsize[0]-1-coords[:,0],c=data[:,i],vmax=mx,vmin=mn,s=180,marker='.',edgecolor='None', cmap=colormap ,alpha=1)
eps = .0075
plt.xlim(0-eps,self.mapsize[1]-1+eps)
plt.ylim(0-eps,self.mapsize[0]-1+eps)
plt.axis('off')
plt.title(self.compname[0][i])
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis('on')
plt.xticks([])
plt.yticks([])
else:
dim = len(which_dim)
rows = len(which_dim)/cols+1
no_row_in_plot = dim/cols + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = cols
h = .1
w= .1
fig = plt.figure(figsize=(no_col_in_plot*2.5*(1+w),no_row_in_plot*2.5*(1+h)))
for i in range(len(which_dim)):
plt.subplot(rows,cols,i+1)
mn = np.min(data[:,:],axis=1)
mx = np.max(data[:,:],axis=1)
# print mx.shape
# print coords.shape
for j in range(data.shape[0]):
sc = plt.scatter(coords[j,1],self.mapsize[0]-1-coords[j,0],c=data[j,which_dim[i]],vmax=mx[j],vmin=mn[j],s=90,marker='.',edgecolor='None', cmap=colormap ,alpha=1)
# mn = data[:,which_dim[i]].min()
# mx = data[:,which_dim[i]].max()
# plt.scatter(coords[:,1],self.mapsize[0]-1-coords[:,0],c=data[:,which_dim[i]],vmax=mx,vmin=mn,s=180,marker='.',edgecolor='None', cmap=colormap ,alpha=1)
eps = .0075
plt.xlim(0-eps,self.mapsize[1]-1+eps)
plt.ylim(0-eps,self.mapsize[0]-1+eps)
plt.axis('off')
plt.title(self.compname[0][which_dim[i]])
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis('on')
plt.xticks([])
plt.yticks([])
plt.tight_layout()
# plt.colorbar(sc,ticks=np.round(np.linspace(mn,mx,5),decimals=1),shrink=0.6)
plt.subplots_adjust(hspace = .16,wspace=.05)
# fig.set_size_inches(msz0/2,msz1/2)
# fig = plt.figure(figsize=(msz0/2,msz1/2))
if save=='Yes':
if save_dir != 'empty':
fig.savefig(save_dir, transparent=False, dpi=200)
else:
add = '/Users/itadmin/Desktop/SOM_dot.png'
print 'save directory: ', add
fig.savefig(add, transparent=False, dpi=200)
plt.close(fig)
def predict_Probability(self, data, Target, K =5):
# here it is assumed that Target is the last column in the codebook #and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
ind = np.arange(0,dim)
indX = ind[ind != Target]
X = codebook[:,indX]
Y = codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = 'distance')
clf.fit(X, Y)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:,Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:,indX]
elif dimdata == dim -1:
data = normalize_by(data_raw[:,indX], data, method='var')
#data = normalize(data, method='var')
weights,ind= clf.kneighbors(data, n_neighbors=K, return_distance=True)
weights = 1./weights
sum_ = np.sum(weights,axis=1)
weights = weights/sum_[:,np.newaxis]
labels = np.sign(codebook[ind,Target])
labels[labels>=0]=1
#for positives
pos_prob = labels.copy()
pos_prob[pos_prob<0]=0
pos_prob = pos_prob*weights
pos_prob = np.sum(pos_prob,axis=1)[:,np.newaxis]
#for negatives
neg_prob = labels.copy()
neg_prob[neg_prob>0]=0
neg_prob = neg_prob*weights*-1
neg_prob = np.sum(neg_prob,axis=1)[:,np.newaxis]
#Predicted_values = clf.predict(data)
#Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return np.concatenate((pos_prob,neg_prob),axis=1)
def node_Activation(self, data, wt= 'distance',Target = None):
"""
‘uniform’
"""
if Target == None:
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors = getattr(self, 'nnodes'))
labels = np.arange(0,codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
weights,ind= clf.kneighbors(data)
##Softmax function
weights = 1./weights
# S_ = np.sum(np.exp(weights),axis=1)[:,np.newaxis]
# weights = np.exp(weights)/S_
return weights , ind
#
def para_bmu_find(self, x, y, njb = 1):
dlen = x.shape[0]
Y2 = None
Y2 = np.einsum('ij,ij->i', y, y)
bmu = None
b = None
#here it finds BMUs for chunk of data in parallel
t_temp = time()
b = Parallel(n_jobs=njb, pre_dispatch='3*n_jobs')(delayed(chunk_based_bmu_find)\
(self, x[i*dlen // njb:min((i+1)*dlen // njb, dlen)],y, Y2) \
for i in xrange(njb))
#print 'bmu finding: %f seconds ' %round(time() - t_temp, 3)
t1 = time()
bmu = np.asarray(list(itertools.chain(*b))).T
#print 'bmu to array: %f seconds' %round(time() - t1, 3)
del b
return bmu
#First finds the Voronoi set of each node. It needs to calculate a smaller matrix. Super fast comparing to classic batch training algorithm
# it is based on the implemented algorithm in som toolbox for Matlab by Helsinky university
def update_codebook_voronoi(self, training_data, bmu, H, radius):
#bmu has shape of 2,dlen, where first row has bmuinds
# we construct ud2 from precomputed UD2 : ud2 = UD2[bmu[0,:]]
nnodes = getattr(self, 'nnodes')
dlen = getattr(self ,'dlen')
dim = getattr(self, 'dim')
New_Codebook = np.empty((nnodes, dim))
inds = bmu[0].astype(int)
# print 'bmu', bmu[0]
# fig = plt.hist(bmu[0],bins=100)
# plt.show()
row = inds
col = np.arange(dlen)
val = np.tile(1,dlen)
P = csr_matrix( (val,(row,col)), shape=(nnodes,dlen) )
S = np.empty((nnodes, dim))
S = P.dot(training_data)
#assert( S.shape == (nnodes, dim))
#assert( H.shape == (nnodes, nnodes))
# H has nnodes*nnodes and S has nnodes*dim ---> Nominator has nnodes*dim
#print Nom
Nom = np.empty((nnodes,nnodes))
Nom = H.T.dot(S)
#assert( Nom.shape == (nnodes, dim))
nV = np.empty((1,nnodes))
nV = P.sum(axis = 1).reshape(1, nnodes)
# print 'nV', nV
# print 'H'
# print H
#assert(nV.shape == (1, nnodes))
Denom = np.empty((nnodes,1))
Denom = nV.dot(H.T).reshape(nnodes, 1)
# print 'Denom'
# print Denom
#assert( Denom.shape == (nnodes, 1))
New_Codebook = np.divide(Nom, Denom)
# print 'codebook'
# print New_Codebook.sum(axis=1)
Nom = None
Denom = None
#assert (New_Codebook.shape == (nnodes,dim))
#setattr(som, 'codebook', New_Codebook)
return np.around(New_Codebook, decimals = 6)
# we will call this function in parallel for different number of jobs
def chunk_based_bmu_find(self, x, y, Y2):
dim = x.shape[1]
dlen = x.shape[0]
nnodes = y.shape[0]
bmu = np.empty((dlen,2))
#it seems that smal batches for large dlen is really faster:
# that is because of ddata in loops and n_jobs. for large data it slows down due to memory needs in parallel
blen = min(50,dlen)
i0 = 0;
d = None
t = time()
while i0+1<=dlen:
Low = (i0)
High = min(dlen,i0+blen)
i0 = i0+blen
ddata = x[Low:High+1]
d = np.dot(y, ddata.T)
d *= -2
d += Y2.reshape(nnodes,1)
bmu[Low:High+1,0] = np.argmin(d, axis = 0)
bmu[Low:High+1,1] = np.min(d, axis = 0)
del ddata
d = None
return bmu
#Batch training which is called for rought training as well as finetuning
def batchtrain(self, njob = 1, phase = None, shared_memory = 'no', verbose='on'):
t0 = time()
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
#############################################
# seting the parameters
initmethod = getattr(self,'initmethod')
mn = np.min(mapsize)
if mn == 1:
mpd = float(nnodes*10)/float(dlen)
else:
mpd = float(nnodes)/float(dlen)
ms = max(mapsize[0],mapsize[1])
if mn == 1:
ms = ms/2.
#Based on somtoolbox, Matlab
#case 'train', sTrain.trainlen = ceil(50*mpd);
#case 'rough', sTrain.trainlen = ceil(10*mpd);
#case 'finetune', sTrain.trainlen = ceil(40*mpd);
if phase == 'rough':
#training length
trainlen = int(np.ceil(30*mpd))
#radius for updating
if initmethod == 'random':
radiusin = max(1, np.ceil(ms/3.))
radiusfin = max(1, radiusin/6.)
# radiusin = max(1, np.ceil(ms/1.))
# radiusfin = max(1, radiusin/2.)
elif initmethod == 'pca':
radiusin = max(1, np.ceil(ms/8.))
radiusfin = max(1, radiusin/4.)
elif phase == 'finetune':
#train lening length
#radius for updating
if initmethod == 'random':
trainlen = int(np.ceil(50*mpd))
radiusin = max(1, ms/12.) #from radius fin in rough training
radiusfin = max(1, radiusin/25.)
# radiusin = max(1, ms/2.) #from radius fin in rough training
# radiusfin = max(1, radiusin/2.)
elif initmethod == 'pca':
trainlen = int(np.ceil(40*mpd))
radiusin = max(1, np.ceil(ms/8.)/4)
radiusfin = 1#max(1, ms/128)
radius = np.linspace(radiusin, radiusfin, trainlen)
##################################################
UD2 = getattr(self, 'UD2')
New_Codebook_V = np.empty((nnodes, dim))
New_Codebook_V = getattr(self, 'codebook')
#print 'data is in shared memory?', shared_memory
if shared_memory == 'yes':
data = getattr(self, 'data')
Data_folder = tempfile.mkdtemp()
data_name = os.path.join(Data_folder, 'data')
dump(data, data_name)
data = load(data_name, mmap_mode='r')
else:
data = getattr(self, 'data')
#X2 is part of euclidean distance (x-y)^2 = x^2 +y^2 - 2xy that we use for each data row in bmu finding.
#Since it is a fixed value we can skip it during bmu finding for each data point, but later we need it calculate quantification error
X2 = np.einsum('ij,ij->i', data, data)
if verbose=='on':
print '%s training...' %phase
print 'radius_ini: %f , radius_final: %f, trainlen: %d' %(radiusin, radiusfin, trainlen)
neigh_func = getattr(self,'neigh')
for i in range(trainlen):
if neigh_func == 'Guassian':
#in case of Guassian neighborhood
H = np.exp(-1.0*UD2/(2.0*radius[i]**2)).reshape(nnodes, nnodes)
if neigh_func == 'Bubble':
# in case of Bubble function
# print radius[i], UD2.shape
# print UD2
H = l(radius[i],np.sqrt(UD2.flatten())).reshape(nnodes, nnodes) + .000000000001
# print H
t1 = time()
bmu = None
bmu = self.para_bmu_find(data, New_Codebook_V, njb = njob)
if verbose=='on':
print
#updating the codebook
t2 = time()
New_Codebook_V = self.update_codebook_voronoi(data, bmu, H, radius)
#print 'updating nodes: ', round (time()- t2, 3)
if verbose=='on':
print "epoch: %d ---> elapsed time: %f, quantization error: %f " %(i+1, round(time() - t1, 3),np.mean(np.sqrt(bmu[1] + X2)))
setattr(self, 'codebook', New_Codebook_V)
bmu[1] = np.sqrt(bmu[1] + X2)
setattr(self, 'bmu', bmu)
def grid_dist(self,bmu_ind):
"""
som and bmu_ind
depending on the lattice "hexa" or "rect" we have different grid distance
functions.
bmu_ind is a number between 0 and number of nodes-1. depending on the map size
bmu_coord will be calculated and then distance matrix in the map will be returned
"""
try:
lattice = getattr(self, 'lattice')
except:
lattice = 'hexa'
print 'lattice not found! Lattice as hexa was set'
if lattice == 'rect':
return rect_dist(self,bmu_ind)
elif lattice == 'hexa':
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
rows = 0.
cols = 0.
pass
#needs to be implemented
print 'to be implemented' , rows , cols
return np.zeros((rows,cols))
def rect_dist(self,bmu):
#the way we consider the list of nodes in a planar grid is that node0 is on top left corner,
#nodemapsz[1]-1 is top right corner and then it goes to the second row.
#no. of rows is map_size[0] and no. of cols is map_size[1]
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
pass
#bmu should be an integer between 0 to no_nodes
if 0<=bmu<=(rows*cols):
c_bmu = int(bmu%cols)
r_bmu = int(bmu/cols)
else:
print 'wrong bmu'
#calculating the grid distance
if np.logical_and(rows>0 , cols>0):
r,c = np.arange(0, rows, 1)[:,np.newaxis] , np.arange(0,cols, 1)
dist2 = (r-r_bmu)**2 + (c-c_bmu)**2
return dist2.ravel()
else:
print 'please consider the above mentioned errors'
return np.zeros((rows,cols)).ravel()
def view_2d(self, text_size,which_dim='all', what = 'codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
no_row_in_plot = dim/6 + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
norm = matplotlib.colors.normalize(vmin = np.mean(codebook.flatten())-1*np.std(codebook.flatten()), vmax = np.mean(codebook.flatten())+1*np.std(codebook.flatten()), clip = True)
while axisNum <dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind].reshape(msz0, msz1)
pl = plt.pcolor(mp[::-1],norm = norm)
# pl = plt.imshow(mp[::-1])
plt.title(compname[0][ind])
font = {'size' : text_size*sH/no_col_in_plot}
plt.rc('font', **font)
plt.axis('off')
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.colorbar(pl)
plt.show()
def view_2d_Pack(self, text_size,which_dim='all', what = 'codebook',save='No', grid='Yes', save_dir = 'empty',text='Yes',CMAP='None',col_sz=None):
import matplotlib.cm as cm
msz0, msz1 = getattr(self, 'mapsize')
if CMAP=='None':
CMAP= cm.RdYlBu_r
# CMAP = cm.jet
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
# plt.figure(figsize=(sH,sV))
# plt.figure(figsize=(7,7))
no_row_in_plot = dim/col_sz + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = col_sz
axisNum = 0
compname = getattr(self, 'compname')
h = .1
w= .1
fig = plt.figure(figsize=(no_col_in_plot*2.5*(1+w),no_row_in_plot*2.5*(1+h)))
# print no_row_in_plot, no_col_in_plot
norm = matplotlib.colors.Normalize(vmin = np.median(codebook.flatten())-1.5*np.std(codebook.flatten()), vmax = np.median(codebook.flatten())+1.5*np.std(codebook.flatten()), clip = False)
DD = pd.Series(data = codebook.flatten()).describe(percentiles=[.03,.05,.1,.25,.3,.4,.5,.6,.7,.8,.9,.95,.97])
norm = matplotlib.colors.Normalize(vmin = DD.ix['3%'], vmax = DD.ix['97%'], clip = False)
while axisNum <dim:
axisNum += 1
ax = fig.add_subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind].reshape(msz0, msz1)
if grid=='Yes':
pl = plt.pcolor(mp[::-1],cmap=CMAP)
elif grid=='No':
plt.imshow(mp[::-1],norm = None,cmap=CMAP)
# plt.pcolor(mp[::-1])
plt.axis('off')
if text=='Yes':
plt.title(compname[0][ind])
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0,msz1)])
ax.yaxis.set_ticks([i for i in range(0,msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=0.5,color='k')
# plt.grid()
# plt.colorbar(pl)
# plt.tight_layout()
plt.subplots_adjust(hspace = h,wspace=w)
if what == 'cluster':
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
else:
print 'clustering based on default parameters...'
codebook = self.cluster()
h = .2
w= .001
fig = plt.figure(figsize=(msz0/2,msz1/2))
ax = fig.add_subplot(1, 1, 1)
mp = codebook[:].reshape(msz0, msz1)
if grid=='Yes':
plt.imshow(mp[::-1],cmap=CMAP)
# pl = plt.pcolor(mp[::-1],cmap=CMAP)
elif grid=='No':
plt.imshow(mp[::-1],cmap=CMAP)
# plt.pcolor(mp[::-1])
plt.axis('off')
if text=='Yes':
plt.title('clusters')
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0,msz1)])
ax.yaxis.set_ticks([i for i in range(0,msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=0.5,color='k')
plt.subplots_adjust(hspace = h,wspace=w)
if save == 'Yes':
if save_dir != 'empty':
# print save_dir
fig.savefig(save_dir,bbox_inches='tight', transparent=False, dpi=200)
else:
# print save_dir
add = '/Users/itadmin/Desktop/SOM.png'
fig.savefig(add,bbox_inches='tight', transparent=False, dpi=200)
plt.close(fig)
def view_1d(self, text_size, which_dim ='all', what = 'codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
no_row_in_plot = dim/6 + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
while axisNum < dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind]
plt.plot(mp,'-k',linewidth = 0.8)
#pl = plt.pcolor(mp[::-1])
plt.title(compname[0][ind])
font = {'size' : text_size*sH/no_col_in_plot}
plt.rc('font', **font)
#plt.axis('off')
#plt.axis([0, msz0, 0, msz1])
#ax.set_yticklabels([])
#ax.set_xticklabels([])
#plt.colorbar(pl)
plt.show()
def lininit(self):
#X = UsigmaWT
#XTX = Wsigma^2WT
#T = XW = Usigma #Transformed by W EigenVector, can be calculated by
#multiplication PC matrix by eigenval too
#Furthe, we can get lower ranks by using just few of the eigen vevtors
#T(2) = U(2)sigma(2) = XW(2) ---> 2 is the number of selected eigenvectors
# This is how we initialize the map, just by using the first two first eigen vals and eigenvectors
# Further, we create a linear combination of them in the new map by giving values from -1 to 1 in each
#Direction of SOM map
# it shoud be noted that here, X is the covariance matrix of original data
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
nnodes = getattr(self, 'nnodes')
if np.min(msize)>1:
coord = np.zeros((nnodes, 2))
for i in range(0,nnodes):
coord[i,0] = int(i/cols) #x
coord[i,1] = int(i%cols) #y
mx = np.max(coord, axis = 0)
mn = np.min(coord, axis = 0)
coord = (coord - mn)/(mx-mn)
coord = (coord - .5)*2
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes,1))
pca = RandomizedPCA(n_components=2) #Randomized PCA is scalable
#pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T/norms)*eigval).T; eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j,:] = codebook[j, :] + coord[j,i]*eigvec[i,:]
return np.around(codebook, decimals = 6)
elif np.min(msize) == 1:
coord = np.zeros((nnodes, 1))
for i in range(0,nnodes):
#coord[i,0] = int(i/cols) #x
coord[i,0] = int(i%cols) #y
mx = np.max(coord, axis = 0)
mn = np.min(coord, axis = 0)
#print coord
coord = (coord - mn)/(mx-mn)
coord = (coord - .5)*2
#print coord
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes,1))
pca = RandomizedPCA(n_components=1) #Randomized PCA is scalable
#pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T/norms)*eigval).T; eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j,:] = codebook[j, :] + coord[j,i]*eigvec[i,:]
return np.around(codebook, decimals = 6)
def normalize(data, method='var'):
#methods = ['var','range','log','logistic','histD','histC']
#status = ['done', 'undone']
me = np.mean(data, axis = 0)
st = np.std(data, axis = 0)
if method == 'var':
me = np.mean(data, axis = 0)
st = np.std(data, axis = 0)
n_data = (data-me)/st
return n_data
def normalize_by(data_raw, data, method='var'):
#methods = ['var','range','log','logistic','histD','histC']
#status = ['done', 'undone']
# to have the mean and std of the original data, by which SOM is trained
me = np.mean(data_raw, axis = 0)
st = np.std(data_raw, axis = 0)
if method == 'var':
n_data = (data-me)/st
return n_data
def denormalize_by(data_by, n_vect, n_method = 'var'):
#based on the normalization
if n_method == 'var':
me = np.mean(data_by, axis = 0)
st = np.std(data_by, axis = 0)
vect = n_vect* st + me
return vect
else:
print 'data is not normalized before'
return n_vect
def l(a,b):
c = np.zeros(b.shape)
c[a-b >=0] = 1
return c
##Function to show hits
#som_labels = sm.project_data(Tr_Data)
#S = pd.DataFrame(data=som_labels,columns= ['label'])
#a = S['label'].value_counts()
#a = a.sort_index()
#a = pd.DataFrame(data=a.values, index=a.index,columns=['label'])
#d = pd.DataFrame(data= range(msz0*msz1),columns=['node_ID'])
#c = d.join(a,how='outer')
#c.fillna(value=0,inplace=True)
#hits = c.values[:,1]
#hits = hits
#nodeID = np.arange(msz0*msz1)
#c_bmu = nodeID%msz1
#r_bmu = msz0 - nodeID/msz1
#fig, ax = plt.subplots()
#plt.axis([0, msz0, 0, msz1])
#ax.scatter(r_bmu, c_bmu, s=hits/2)
|
shawngraham/SOMPY
|
sompy.py
|
Python
|
mit
| 50,591
|
[
"Gaussian"
] |
053265c23bd6f35840b0aebbd269efeaae082b10fc9391ffe541692a91e35a63
|
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import m5
from m5 import internal
from m5.internal.stats import schedStatEvent as schedEvent
from m5.objects import Root
from m5.util import attrdict, fatal
outputList = []
def initText(filename, desc=True):
output = internal.stats.initText(filename, desc)
registerOutput(output)
def registerOutput(output):
outputList.append(output)
dumpListeners = []
def registerDumpListener(listener):
dumpListeners.append(listener)
def initSimStats():
internal.stats.initSimStats()
names = []
stats_dict = {}
stats_list = []
raw_stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
__dynamic_cast = []
for k, v in internal.stats.__dict__.iteritems():
if k.startswith('dynamic_'):
__dynamic_cast.append(v)
for stat in internal.stats.statsList():
for cast in __dynamic_cast:
val = cast(stat)
if val is not None:
stats_list.append(val)
raw_stats_list.append(val)
break
else:
fatal("unknown stat type %s", stat)
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("stat check failed for '%s' %d\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
internal.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump():
'''Dump all statistics data to the registered outputs'''
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
internal.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin()
for stat in stats_list:
output.visit(stat)
output.end()
for listener in dumpListeners:
listener()
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
internal.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
danielpalomino/gem5
|
src/python/m5/stats/__init__.py
|
Python
|
bsd-3-clause
| 4,670
|
[
"VisIt"
] |
1a494c58f7d60213484a7b986181781948db319be47f5763d9844e1483f1ad6b
|
"""Class for making requests to a ComponentMonitoring Service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
@createClient("Framework/ComponentMonitoring")
class ComponentMonitoringClient(Client):
def __init__(self, **kwargs):
"""
Constructor function
"""
super(ComponentMonitoringClient, self).__init__(**kwargs)
self.setServer("Framework/ComponentMonitoring")
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/Client/ComponentMonitoringClient.py
|
Python
|
gpl-3.0
| 552
|
[
"DIRAC"
] |
b5463e068314be044d8b7518f2317ee201872bf317e949a1e177ba98f228aa86
|
""" DIRAC.DataManagementSystem.private package """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id"
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/private/__init__.py
|
Python
|
gpl-3.0
| 179
|
[
"DIRAC"
] |
04eb561ae4ac83c0468142c49efe9a7d2da356c509a81854717fd895d6247765
|
#!/usr/bin/env python
#########################################################################################
#
# Spinal Cord Automatic Detection
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-07-27
#
# About the license: see the file LICENSE
#########################################################################################
import sys
from msct_base_classes import BaseScript, Algorithm
from msct_parser import Parser
from msct_image import Image
import os
import sct_utils as sct
import numpy as np
from sct_straighten_spinalcord import smooth_centerline
import sct_convert as conv
def smooth_minimal_path(img, nb_pixels=1):
"""
Function intended to smooth the minimal path result in the R-L/A-P directions with a gaussian filter
of a kernel of size nb_pixels
:param img: Image to be smoothed (is intended to be minimal path image)
:param nb_pixels: kernel size of the gaussian filter
:return: returns a smoothed image
"""
nx, ny, nz, nt, px, py, pz, pt = img.dim
from scipy.ndimage.filters import gaussian_filter
raw_orientation = img.change_orientation()
img.data = gaussian_filter(img.data, [nb_pixels/px, nb_pixels/py, 0])
img.change_orientation(raw_orientation)
return img
def symmetry_detector_right_left(data, cropped_xy=0):
"""
This function
:param img: input image used for the algorithm
:param cropped_xy: 1 when we want to crop around the center for the correlation, 0 when not
:return: returns an image that is the body symmetry (correlation between left and right side of the image)
"""
from scipy.ndimage.filters import gaussian_filter
# Change orientation and define variables for
data = np.squeeze(data)
dim = data.shape
img_data = gaussian_filter(data, [0, 5, 5])
# Cropping around center of image to remove side noise
if cropped_xy:
x_mid = np.round(dim[0]/2)
x_crop_min = int(x_mid - (0.25/2)*dim[0])
x_crop_max = int(x_mid + (0.25/2)*dim[0])
img_data[0:x_crop_min,:,:] = 0
img_data[x_crop_max:-1,:,:] = 0
# Acquiring a slice and inverted slice for correlation
slice_p = np.squeeze(np.sum(img_data, 1))
slice_p_reversed = np.flipud(slice_p)
# initialise containers for correlation
m, n = slice_p.shape
cross_corr = ((2*m)-1, n)
cross_corr = np.zeros(cross_corr)
for iz in range(0, np.size(slice_p[1])):
corr1 = slice_p[:, iz]
corr2 = slice_p_reversed[:, iz]
cross_corr[:, iz] = np.double(np.correlate(corr1, corr2, "full"))
max_value = np.max(cross_corr[:, iz])
if max_value == 0:
cross_corr[:, iz] = 0
else:
cross_corr[:, iz] = cross_corr[:, iz]/max_value
data_out = np.zeros((dim[0], dim[2]))
index1 = np.round(np.linspace(0,2*m-3, m))
index2 = np.round(np.linspace(1,2*m-2, m))
for i in range(0,m):
indx1 = int(index1[i])
indx2 = int(index2[i])
out1 = cross_corr[indx1, :]
out2 = cross_corr[indx2, :]
data_out[i, :] = 0.5*(out1 + out2)
result = np.hstack([data_out[:, np.newaxis, :] for i in range(0, dim[1])])
return result
def normalize_array_histogram(array):
"""
Equalizes the data in array
:param array:
:return:
"""
array_min = np.amin(array)
array -= array_min
array_max = np.amax(array)
array /= array_max
return array
def get_minimum_path(data, smooth_factor=np.sqrt(2), invert=1, verbose=1, debug=0):
"""
This method returns the minimal path of the image
:param data: input data of the image
:param smooth_factor:factor used to smooth the directions that are not up-down
:param invert: inverts the image data for the algorithm. The algorithm works better if the image data is inverted
:param verbose:
:param debug:
:return:
"""
[m, n, p] = data.shape
max_value = np.amax(data)
if invert:
data=max_value-data
J1 = np.ones([m, n, p])*np.inf
J2 = np.ones([m, n, p])*np.inf
J1[:, :, 0] = 0
for row in range(1, p):
pJ = J1[:, :, row-1]
cP = np.squeeze(data[1:-2, 1:-2, row])
VI = np.dstack((cP*smooth_factor, cP*smooth_factor, cP, cP*smooth_factor, cP*smooth_factor))
Jq = np.dstack((pJ[0:-3, 1:-2], pJ[1:-2, 0:-3], pJ[1:-2, 1:-2], pJ[1:-2, 2:-1], pJ[2:-1, 1:-2]))
J1[1:-2, 1:-2, row] = np.min(Jq+VI, 2)
pass
J2[:, :, p-1] = 0
for row in range(p-2, -1, -1):
pJ = J2[:, :, row+1]
cP = np.squeeze(data[1:-2, 1:-2, row])
VI = np.dstack((cP*smooth_factor, cP*smooth_factor, cP, cP*smooth_factor, cP*smooth_factor))
Jq = np.dstack((pJ[0:-3, 1:-2], pJ[1:-2, 0:-3], pJ[1:-2, 1:-2], pJ[1:-2, 2:-1], pJ[2:-1, 1:-2]))
J2[1:-2, 1:-2, row] = np.min(Jq+VI, 2)
pass
result = J1+J2
if invert:
percent = np.percentile(result, 50)
result[result > percent] = percent
result_min = np.amin(result)
result_max = np.amax(result)
result = np.divide(np.subtract(result, result_min), result_max)
result_max = np.amax(result)
result = 1-result
result[result == np.inf] = 0
result[result == np.nan] = 0
return result, J1, J2
def get_minimum_path_nii(fname):
from msct_image import Image
data=Image(fname)
vesselness_data = data.data
raw_orient=data.change_orientation()
result ,J1, J2 = get_minimum_path(data.data, invert=1)
data.data = result
data.change_orientation(raw_orient)
data.file_name += '_minimalpath'
data.save()
def ind2sub(array_shape, ind):
"""
:param array_shape: shape of the array
:param ind: index number
:return: coordinates equivalent to the index number for a given array shape
"""
rows = (ind.astype('int') / array_shape[1])
cols = (ind.astype('int') % array_shape[1]) # or numpy.mod(ind.astype('int'), array_shape[1])
return rows, cols
def get_centerline(data, dim):
"""
This function extracts the highest value per slice from a minimal path image
and builds the centerline from it
:param data:
:param dim:
:return:
"""
centerline = np.zeros(dim)
data[data == np.inf] = 0
data[data == np.nan] = 0
for iz in range(0, dim[2]):
ind = np.argmax(data[:, :, iz])
X, Y = ind2sub(data[:, :, iz].shape,ind)
centerline[X,Y,iz] = 1
return centerline
class SymmetryDetector(Algorithm):
def __init__(self, input_image, contrast=None, verbose=0, direction="lr", nb_sections=1, crop_xy=1):
super(SymmetryDetector, self).__init__(input_image)
self._contrast = contrast
self._verbose = verbose
self.direction = direction
self.nb_sections = nb_sections
self.crop_xy = crop_xy
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
if value in ['t1', 't2']:
self._contrast = value
else:
raise Exception('ERROR: contrast value must be t1 or t2')
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, value):
if value in [0, 1]:
self._verbose = value
else:
raise Exception('ERROR: verbose value must be an integer and equal to 0 or 1')
def execute(self):
"""
This method executes the symmetry detection
:return: returns the symmetry data
"""
img = Image(self.input_image)
raw_orientation = img.change_orientation()
data = np.squeeze(img.data)
dim = data.shape
section_length = dim[1]/self.nb_sections
result = np.zeros(dim)
for i in range(0, self.nb_sections):
if (i+1)*section_length > dim[1]:
y_length = (i+1)*section_length - ((i+1)*section_length - dim[1])
result[:, i*section_length:i*section_length + y_length, :] = symmetry_detector_right_left(data[:, i*section_length:i*section_length + y_length, :], cropped_xy=self.crop_xy)
sym = symmetry_detector_right_left(data[:, i*section_length:(i+1)*section_length, :], cropped_xy=self.crop_xy)
result[:, i*section_length:(i+1)*section_length, :] = sym
result_image = Image(img)
if len(result_image.data) == 4:
result_image.data = result[:,:,:,np.newaxis]
else:
result_image.data = result
result_image.change_orientation(raw_orientation)
return result_image.data
class ScadScript(BaseScript):
def __init__(self):
super(ScadScript, self).__init__()
@staticmethod
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('''This program automatically detect the spinal cord in a MR image and output a centerline of the spinal cord.''')
parser.add_option(name="-i",
type_value="file",
description="input image.",
mandatory=True,
example="t2.nii.gz")
parser.add_option(name="-t",
type_value="multiple_choice",
description="type of image contrast, t2: cord dark / CSF bright ; t1: cord bright / CSF dark",
mandatory=True,
example=['t1', 't2'])
parser.usage.addSection("General options")
parser.add_option(name="-o",
type_value="string",
description="Centerline file name (result file name)",
mandatory=False,
example="out.nii.gz")
parser.add_option(name="-sym",
type_value="multiple_choice",
description="Uses right-left symmetry of the image to improve accuracy.",
mandatory=False,
default_value="0",
example=['0', '1'])
parser.add_option(name="-sym_exp",
type_value="int",
description="Weight symmetry value (only use with flag -sym). Minimum weight: 0, maximum weight: 100.",
mandatory=False,
default_value="10")
parser.add_option(name="-r",
type_value="multiple_choice",
description= "Removes the temporary folder and debug folder used for the algorithm at the end of execution",
mandatory=False,
default_value="0",
example=['0', '1'])
parser.add_option(name="-sc_rad",
type_value="int",
description="Gives approximate radius of spinal cord to help the algorithm",
mandatory=False,
default_value="4",
example="4")
parser.add_option(name="-v",
type_value="multiple_choice",
description="1: display on, 0: display off (default)",
mandatory=False,
example=["0", "1"],
default_value="1")
parser.add_option(name="-h",
type_value=None,
description="display this help",
mandatory=False)
return parser
class SCAD(Algorithm):
def __init__(self, input_image, contrast=None, verbose=1, rm_tmp_file=0,output_filename=None, debug=0, vesselness_provided=0, minimum_path_exponent=100, enable_symmetry=0, symmetry_exponent=0, spinalcord_radius = 3):
"""
Constructor for the automatic spinal cord detection
:param output_filename: Name of the result file of the centerline detection. Must contain the extension (.nii / .nii.gz)
:param input_image:
:param contrast:
:param verbose:
:param rm_tmp_file:
:param debug:
:param produce_output: Produce output debug files,
:param vesselness_provided: Activate if the vesselness filter image is already provided (to save time),
the image is expected to be in the same folder as the input image
:return:
"""
super(SCAD, self).__init__(input_image, produce_output=1-rm_tmp_file)
self._contrast = contrast
self._verbose = verbose
self.output_filename = input_image.file_name + "_centerline.nii.gz"
if output_filename is not None:
self.output_filename = output_filename
self.rm_tmp_file = rm_tmp_file
self.debug = debug
self.vesselness_provided = vesselness_provided
self.minimum_path_exponent = minimum_path_exponent
self.enable_symmetry = enable_symmetry
self.symmetry_exponent = symmetry_exponent
self.spinalcord_radius = spinalcord_radius
# attributes used in the algorithm
self.raw_orientation = None
self.raw_symmetry = None
self.J1_min_path = None
self.J2_min_path = None
self.minimum_path_data = None
self.minimum_path_powered = None
self.smoothed_min_path = None
self.spine_detect_data = None
self.centerline_with_outliers = None
self.debug_folder = None
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
if value in ['t1', 't2']:
self._contrast = value
else:
raise Exception('ERROR: contrast value must be t1 or t2')
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, value):
if value in [0, 1]:
self._verbose = value
else:
raise Exception('ERROR: verbose value must be an integer and equal to 0 or 1')
def produce_output_files(self):
"""
Method used to output all debug files at the same time. To be used after the algorithm is executed
:return:
"""
import time
from sct_utils import slash_at_the_end
path_tmp = slash_at_the_end('scad_output_'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+path_tmp, self.verbose)
# getting input image header
img = self.input_image.copy()
# saving body symmetry
img.data = self.raw_symmetry
img.change_orientation(self.raw_orientation)
img.file_name += "body_symmetry"
img.save()
# saving minimum paths
img.data = self.minimum_path_data
img.change_orientation(self.raw_orientation)
img.file_name = "min_path"
img.save()
img.data = self.J1_min_path
img.change_orientation(self.raw_orientation)
img.file_name = "J1_min_path"
img.save()
img.data = self.J2_min_path
img.change_orientation(self.raw_orientation)
img.file_name = "J2_min_path"
img.save()
# saving minimum path powered
img.data = self.minimum_path_powered
img.change_orientation(self.raw_orientation)
img.file_name = "min_path_powered_"+str(self.minimum_path_exponent)
img.save()
# saving smoothed min path
img = self.smoothed_min_path.copy()
img.change_orientation(self.raw_orientation)
img.file_name = "min_path_power_"+str(self.minimum_path_exponent)+"_smoothed"
img.save()
# save symmetry_weighted_minimal_path
img.data = self.spine_detect_data
img.change_orientation(self.raw_orientation)
img.file_name = "symmetry_weighted_minimal_path"
img.save()
def output_debug_file(self, img, data, file_name):
"""
This method writes a nifti file that corresponds to a step in the algorithm for easy debug.
The new nifti file uses the header from the the image passed as parameter
:param data: data to be written to file
:param file_name: filename...
:return: None
"""
if self.produce_output:
current_folder = os.getcwd()
os.chdir(self.debug_folder)
try:
img = Image(img)
img.data = data
img.change_orientation(self.raw_orientation)
img.file_name = file_name
img.save()
except Exception, e:
print e
os.chdir(current_folder)
def setup_debug_folder(self):
"""
Sets up the folder for the step by step files for this algorithm
The folder's absolute path can be found in the self.debug_folder property
:return: None
"""
if self.produce_output:
import time
from sct_utils import slash_at_the_end
folder = slash_at_the_end('scad_output_'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+folder, self.verbose)
self.debug_folder = os.path.abspath(folder)
conv.convert(str(self.input_image.absolutepath), str(self.debug_folder)+"/raw.nii.gz")
def create_temporary_path(self):
import time
from sct_utils import slash_at_the_end
path_tmp = slash_at_the_end('tmp.'+time.strftime("%y%m%d%H%M%S"), 1)
sct.run('mkdir '+path_tmp, self.verbose)
return path_tmp
def execute(self):
print 'Execution of the SCAD algorithm in '+str(os.getcwd())
original_name = self.input_image.file_name
vesselness_file_name = "imageVesselNessFilter.nii.gz"
raw_file_name = "raw.nii"
self.setup_debug_folder()
if self.debug:
import matplotlib.pyplot as plt # import for debug purposes
# create tmp and copy input
path_tmp = self.create_temporary_path()
conv.convert(self.input_image.absolutepath, path_tmp+raw_file_name)
if self.vesselness_provided:
sct.run('cp '+vesselness_file_name+' '+path_tmp+vesselness_file_name)
os.chdir(path_tmp)
# get input image information
img = Image(raw_file_name)
# save original orientation and change image to RPI
self.raw_orientation = img.change_orientation()
# get body symmetry
if self.enable_symmetry:
from msct_image import change_data_orientation
sym = SymmetryDetector(raw_file_name, self.contrast, crop_xy=0)
self.raw_symmetry = sym.execute()
img.change_orientation(self.raw_orientation)
self.output_debug_file(img, self.raw_symmetry, "body_symmetry")
img.change_orientation()
# vesselness filter
if not self.vesselness_provided:
sct.run('isct_vesselness -i '+raw_file_name+' -t ' + self._contrast+" -radius "+str(self.spinalcord_radius))
# load vesselness filter data and perform minimum path on it
img = Image(vesselness_file_name)
self.output_debug_file(img, img.data, "Vesselness_Filter")
img.change_orientation()
self.minimum_path_data, self.J1_min_path, self.J2_min_path = get_minimum_path(img.data, invert=1, debug=1)
self.output_debug_file(img, self.minimum_path_data, "minimal_path")
self.output_debug_file(img, self.J1_min_path, "J1_minimal_path")
self.output_debug_file(img, self.J2_min_path, "J2_minimal_path")
# Apply an exponent to the minimum path
self.minimum_path_powered = np.power(self.minimum_path_data, self.minimum_path_exponent)
self.output_debug_file(img, self.minimum_path_powered, "minimal_path_power_"+str(self.minimum_path_exponent))
# Saving in Image since smooth_minimal_path needs pixel dimensions
img.data = self.minimum_path_powered
# smooth resulting minimal path
self.smoothed_min_path = smooth_minimal_path(img)
self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth")
# normalise symmetry values between 0 and 1
if self.enable_symmetry:
normalised_symmetry = normalize_array_histogram(self.raw_symmetry)
self.output_debug_file(img, self.smoothed_min_path.data, "minimal_path_smooth")
# multiply normalised symmetry data with the minimum path result
from msct_image import change_data_orientation
self.spine_detect_data = np.multiply(self.smoothed_min_path.data, change_data_orientation(np.power(normalised_symmetry, self.symmetry_exponent), self.raw_orientation, "RPI"))
self.output_debug_file(img, self.spine_detect_data, "symmetry_x_min_path")
# extract the centerline from the minimal path image
self.centerline_with_outliers = get_centerline(self.spine_detect_data, self.spine_detect_data.shape)
else:
# extract the centerline from the minimal path image
self.centerline_with_outliers = get_centerline(self.smoothed_min_path.data, self.smoothed_min_path.data.shape)
self.output_debug_file(img, self.centerline_with_outliers, "centerline_with_outliers")
# saving centerline with outliers to have
img.data = self.centerline_with_outliers
img.change_orientation()
img.file_name = "centerline_with_outliers"
img.save()
# use a b-spline to smooth out the centerline
x, y, z, dx, dy, dz = smooth_centerline("centerline_with_outliers.nii.gz")
# save the centerline
nx, ny, nz, nt, px, py, pz, pt = img.dim
img.data = np.zeros((nx, ny, nz))
for i in range(0, np.size(x)-1):
img.data[int(x[i]), int(y[i]), int(z[i])] = 1
self.output_debug_file(img, img.data, "centerline")
img.change_orientation(self.raw_orientation)
img.file_name = "centerline"
img.save()
# copy back centerline
os.chdir('../')
conv.convert(path_tmp+img.file_name+img.ext, self.output_filename)
if self.rm_tmp_file == 1:
import shutil
shutil.rmtree(path_tmp)
print "To view the output with FSL :"
sct.printv("fslview "+self.input_image.absolutepath+" "+self.output_filename+" -l Red", self.verbose, "info")
if __name__ == "__main__":
parser = ScadScript.get_parser()
arguments = parser.parse(sys.argv[1:])
input_image = Image(arguments["-i"])
contrast_type = arguments["-t"]
scad = SCAD(input_image, contrast=contrast_type)
if "-o" in arguments:
scad.output_filename = arguments["-o"]
# if "-p" in arguments:
# scad.produce_output = int(arguments["-p"])
if "-r" in arguments:
scad.rm_tmp_file = int(arguments["-r"])
if "-sym" in arguments:
scad.enable_symmetry = int(arguments["-sym"])
if "-sym_exp" in arguments:
scad.symmetry_exponent = int(arguments["-sym_exp"])
if "-sc_rad" in arguments:
scad.spinalcord_radius = int(arguments["-sc_rad"])
scad.execute()
|
3324fr/spinalcordtoolbox
|
dev/sct_detect_spinalcord/sct_detect_spinalcord.py
|
Python
|
mit
| 23,339
|
[
"Gaussian"
] |
6ee3283e49f1646bb3a5bd3e9f8c4a48414fc28d9ee8ea90008279ebfe89a254
|
""" drivers/__init__.py
@copyright: (C) 2012-2017 by D. Brian Kimmel
The following terms apply to all files associated
with the software unless explicitly disclaimed in individual files.
The authors hereby grant permission to use, copy, modify, distribute,
and license this software and its documentation for any purpose, provided
that existing copyright notices are retained in all copies and that this
notice is included verbatim in any distributions. No written agreement,
license, or royalty fee is required for any of the authorized uses.
Modifications to this software may be copyrighted by their authors
and need not follow the licensing terms described here, provided that
the new terms are clearly indicated on the first page of each file where
they apply.
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
MODIFICATIONS.
----------------------------------------------------------------------------
These are the drivers for various interfaces with the computer.
Serial - going away but it still exists. Serial commands thru a USB dongle work here.
USB - the new standard.
HID - Human Interface Device class of USB devices.
Ethernet - not too much of this is used yet.
"""
__updated__ = '2017-01-20'
__version_info__ = (1, 6, 0)
__version__ = '.' . join(map(str, __version_info__))
VALID_INTERFACES = ['Null', 'Serial', 'USB', 'Ethernet']
VALID_PROTOCOLS = ['TCP', 'UDP', 'Both']
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Core/Drivers/__init__.py
|
Python
|
mit
| 2,040
|
[
"Brian"
] |
77b3a299f5f7f0b62aff3fe0732b358e1bb8fe62d7387fa3b25e1318d27f96fb
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from datetime import datetime
from StringIO import StringIO
import unittest2 as unittest
from unittest2.runner import _WritelnDecorator
from tinctest import TINCTestLoader
from tinctest import TINCTextTestResult
from mpp.models import SQLConcurrencyTestCase
@unittest.skip('mock')
class MockSQLConcurrencyTestCase(SQLConcurrencyTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
"""
def setUp(self):
pass
def test_explicit_definition(self):
pass
def test_with_gpdiff(self):
"""
@gpdiff True
"""
pass
class SQLConcurrencyTestCaseTests(unittest.TestCase):
def test_infer_metadata(self):
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLConcurrencyTestCase)
test_case = None
for case in test_suite._tests:
if case.name == "MockSQLConcurrencyTestCase.test_query02":
test_case = case
self.assertNotEqual(test_case, None)
self.assertEqual(test_case.name, "MockSQLConcurrencyTestCase.test_query02")
self.assertEqual(test_case.author, 'kumara64')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-08 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
self.assertEqual(test_case.gpdiff, False)
def test_with_gpdiff(self):
test_case = MockSQLConcurrencyTestCase('test_with_gpdiff')
self.assertEqual(test_case.gpdiff, True)
|
rvs/gpdb
|
src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_concurrency_test_case.py
|
Python
|
apache-2.0
| 2,523
|
[
"ORCA"
] |
b627892c614a930a9a87d4ca3f460867ca878ac0f68714b7764f735d01479a56
|
#!/usr/bin/env python
"""
ezgrid.py
Functions for generating ROMS grid files
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
import numpy as np
from datetime import datetime
import netCDF4
import seapy
from collections import namedtuple
def create_grid(grid_file, lat, lon):
"""
Create a new, basic grid. This will construct the ROMS grid netcdf file
given a set of latitude and longitude coordinates for the rho-grid.
The coordinates can be spaced however specified, and the grid will be
created; however, this does not guarantee the grid will be valid.
After the grid is created, the bathymetry and mask will need to be
generated independently.
Parameters
----------
grid_file : string,
Name of the grid file to create. NOTE: this file will be
overwritten.
lat : ndarray,
latitude of the grid cells. The array must be the size
of the desired grid.
lon : ndarray,
longitude of the grid cells. The array must be the size
of the desired grid.
Returns
-------
netCDF4 :
The netcdf object of the new grid
Examples
--------
To create a basic, evenly spaced grid:
>>> lat = np.linspace(10,20,0.25)
>>> lon = np.linspace(-100,-80,0.25)
>>> lon, lat = np.meshgrid(lon, lat)
>>> create_grid('mygrid.nc', lat, lon)
To create more advanced grids, simply generate the
2D arrays of lat and lon in the manner you want your cells
and call create_grid:
>>> create_grid('mygrid.nc', lat, lon)
"""
# Put lat/lon into proper arrays
lat = np.atleast_2d(lat)
lon = np.atleast_2d(lon)
if lat.shape != lon.shape:
raise AttributeError("lat and lon shapes are not equal")
# Calculate the angle between the points
angle = np.zeros(lat.shape)
angle[:, :-1] = seapy.earth_angle(lon[:, :-1],
lat[:, :-1], lon[:, 1:], lat[:, 1:])
angle[:, -1] = angle[:, -2]
# Calculate distances/parameters
f = 2.0 * 7.2921150e-5 * np.sin(lat * np.pi / 180.0)
dx = np.zeros(f.shape)
dy = np.zeros(f.shape)
dx[:, 1:] = seapy.earth_distance(
lon[:, 1:], lat[:, 1:], lon[:, :-1], lat[:, :-1])
dy[1:, :] = seapy.earth_distance(
lon[1:, :], lat[1:, :], lon[:-1, :], lat[:-1, :])
dx[:, 0] = dx[:, 1]
dy[0, :] = dy[1, :]
pm = 1.0 / dx
pn = 1.0 / dy
dndx = np.zeros(dx.shape)
dmde = np.zeros(dx.shape)
dndx[:, 1:-1] = 0.5 * (dy[:, 2:] - dy[:, :-2])
dmde[1:-1, :] = 0.5 * (dx[2:, :] - dx[:-2, :])
xl = seapy.earth_distance(
np.min(lon), np.mean(lat), np.max(lon), np.mean(lat))
el = seapy.earth_distance(
np.mean(lon), np.min(lat), np.mean(lon), np.max(lat))
# Generate rho-grid coordinates
x_rho = np.zeros(lat.shape)
y_rho = np.zeros(lat.shape)
x_rho[:, 1:] = seapy.earth_distance(
lon[:, :-1], lat[:, :-1], lon[:, 1:], lat[:, 1:])
x_rho = np.cumsum(x_rho, axis=1)
y_rho[1:, :] = seapy.earth_distance(
lon[:-1, :], lat[:-1, :], lon[1:, :], lat[1:, :])
y_rho = np.cumsum(y_rho, axis=0)
# Create u-grid
lat_u = 0.5 * (lat[:, 1:] + lat[:, :-1])
lon_u = 0.5 * (lon[:, 1:] + lon[:, 0:-1])
x_u = np.zeros(lat_u.shape)
y_u = np.zeros(lat_u.shape)
x_u[:, 1:] = seapy.earth_distance(
lon_u[:, :-1], lat_u[:, :-1], lon_u[:, 1:], lat_u[:, 1:])
x_u = np.cumsum(x_u, axis=1)
y_u[1:, :] = seapy.earth_distance(
lon_u[:-1, :], lat_u[:-1, :], lon_u[1:, :], lat_u[1:, :])
y_u = np.cumsum(y_u, axis=0)
# Create v-grid
lat_v = 0.5 * (lat[1:, :] + lat[0:-1, :])
lon_v = 0.5 * (lon[1:, :] + lon[0:-1, :])
x_v = np.zeros(lat_v.shape)
y_v = np.zeros(lat_v.shape)
x_v[:, 1:] = seapy.earth_distance(
lon_v[:, :-1], lat_v[:, :-1], lon_v[:, 1:], lat_v[:, 1:])
x_v = np.cumsum(x_v, axis=1)
y_v[1:, :] = seapy.earth_distance(
lon_v[:-1, :], lat_v[:-1, :], lon_v[1:, :], lat_v[1:, :])
y_v = np.cumsum(y_v, axis=0)
# Create psi-grid
lat_psi = lat_v[:, :-1]
lon_psi = lon_u[:-1, :]
x_psi = np.zeros(lat_psi.shape)
y_psi = np.zeros(lat_psi.shape)
x_psi[:, 1:] = seapy.earth_distance(
lon_psi[:, :-1], lat_psi[:, :-1], lon_psi[:, 1:], lat_psi[:, 1:])
x_psi = np.cumsum(x_psi, axis=1)
y_psi[1:, :] = seapy.earth_distance(
lon_psi[:-1, :], lat_psi[:-1, :], lon_psi[1:, :], lat_psi[1:, :])
y_psi = np.cumsum(y_psi, axis=0)
# Create the new grid
nc = seapy.roms.ncgen.create_grid(
grid_file, lat.shape[0], lat.shape[1], clobber=True)
nc.variables["xl"][:] = xl
nc.variables["el"][:] = el
nc.variables["spherical"][:] = 1
nc.variables["f"][:] = f
nc.variables["pm"][:] = pm
nc.variables["pn"][:] = pn
nc.variables["dndx"][:] = dndx
nc.variables["dmde"][:] = dmde
nc.variables["x_rho"][:] = x_rho
nc.variables["y_rho"][:] = y_rho
nc.variables["x_psi"][:] = x_psi
nc.variables["y_psi"][:] = y_psi
nc.variables["x_u"][:] = x_u
nc.variables["y_u"][:] = y_u
nc.variables["x_v"][:] = x_v
nc.variables["y_v"][:] = y_v
nc.variables["lat_rho"][:] = lat
nc.variables["lon_rho"][:] = lon
nc.variables["lat_psi"][:] = lat_psi
nc.variables["lon_psi"][:] = lon_psi
nc.variables["lat_u"][:] = lat_u
nc.variables["lon_u"][:] = lon_u
nc.variables["lat_v"][:] = lat_v
nc.variables["lon_v"][:] = lon_v
nc.variables["N"][:] = 1
nc.variables["mask_rho"][:] = np.ones(lat.shape)
nc.variables["mask_u"][:] = np.ones(lat_u.shape)
nc.variables["mask_v"][:] = np.ones(lat_v.shape)
nc.variables["mask_psi"][:] = np.ones(lat_psi.shape)
nc.variables["angle"][:] = angle
nc.variables["rdrag"][:] = np.ones(lon.shape) * 0.0003
nc.variables["rdrag2"][:] = np.ones(lon.shape) * 0.003
nc.variables["visc_factor"][:] = np.ones(lon.shape)
nc.variables["diff_factor"][:] = np.ones(lon.shape)
nc.variables["ZoBot"][:] = np.ones(lon.shape) * 0.02
nc.sync()
return nc
def calc_latlon(llcrnrlat, llcrnrlon, reseta, resxi=None, rotate=0):
"""
Generate arrays for latitude and longitude for use in
creating simple grids.
NOTE: You can specify variational resolutions and rotations;
however, this uses a simple algorithm to step along from the
origin, and discrepencies are averaged together. THEREFORE,
if you are not specifying inconsistent resolutions or
rotations within single row or columns, you may get results
slightly different than specified.
Parameters
----------
llcrnrlat : float,
Latitude for the lower, left of the grid
llcrnrlon : float,
Longitude for the lower, left of the grid
reseta : ndarray,
Resolution in meters in the eta-direction of each grid cell.
A 2D array that is the horizontal size of the grid (e.g.,
resolution.shape = (100,70)) and the values stored are
the desired resolution of the grid cell in m. Hence,
a 100 x 70 array of ones would create a grid that is
100 in the eta-direction, 70 in the xi-direction, and
a constant resolution of 1km. If the lat and lon is
specified as arrays, this is optional.
resxi : ndarray, optional,
Resolution in meters in the xi-direction of each grid cell.
This is the same as reseta; however, it is optional, and
is set to the same as reseta if not specified.
rotate : float or ndarray,
Amount to rotate the grid in degrees. If given as a scalar,
the entire grid is rotated at the same angle; otherwise, the
grid my have curvilinear shape. The angle is geometric
(counter-clockwise).
Returns
-------
lat, lon : ndarray
Two arrays of size resolution containing the computed lat
and lons
Examples
--------
Create a grid of 1km resolution in both eta and xi,
rotated toward the SE by 33 degrees, with the lower left
point at 20N, 150E:
>>> res = np.ones((100,70)) * 1000
>>> lat, lon = calc_latlon(20, 150, res, rotate=-33)
"""
# Set up the resolutions
if resxi is None:
resxi = reseta
else:
if resxi.shape != reseta.shape:
raise AttributeError(
"xi and eta resolutions must be same size array")
# Since we are taking steps, average the values together as we
# can't take the final step
reseta[:-1, :] = 0.5 * (reseta[:-1, :] + reseta[1:, :])
resxi[:, :-1] = 0.5 * (resxi[:, :-1] + resxi[:, 1:])
# Set up the rotations
if np.isscalar(rotate):
rotate = np.ones(reseta.shape) * rotate
rotate = np.radians(rotate)
# Set up the lat and lon arrays
lat = np.ones(reseta.shape) * np.nan
lon = np.ones(reseta.shape) * np.nan
lat[0, 0] = llcrnrlat
lon[0, 0] = llcrnrlon
pi2 = np.pi / 2.0
# Loop over each row, and within the row, step along in the
# xi-direction before moving to the next row
for j in seapy.progressbar.progress(range(lat.shape[0] - 1)):
for i in range(lat.shape[1] - 1):
# Compute the local deltas
dlat = seapy.earth_distance(
lon[j, i], lat[j, i] - 0.5, lon[j, i], lat[j, i] + 0.5)
dlon = seapy.earth_distance(
lon[j, i] - 0.5, lat[j, i], lon[j, i] + 0.5, lat[j, i])
# Compute how far to step in xi
xdx = resxi[j, i + 1] * np.cos(rotate[j, i]) / dlon
xdy = resxi[j, i + 1] * np.sin(rotate[j, i]) / dlat
# Take the step
lat[j, i + 1] = lat[j, i] + xdy
lon[j, i + 1] = lon[j, i] + xdx
# Compute how far to step in eta
edx = reseta[j + 1, i] * np.cos(rotate[j, i] + pi2) / dlon
edy = reseta[j + 1, i] * np.sin(rotate[j, i] + pi2) / dlat
# Take the step
lat[j + 1, i] = lat[j, i] + edy
lon[j + 1, i] = lon[j, i] + edx
lon[-1, -1] = lon[-1, -2] + xdx
lat[-1, -1] = lat[-2, -1] + edy
return lat, lon
|
dalepartridge/seapy
|
roms/ezgrid.py
|
Python
|
mit
| 10,073
|
[
"NetCDF"
] |
21a72df0957f910625cdb8c9c2b856404465e70c48aab54db15c7c45f7b3881e
|
from handlers.base import BaseHandler
from tornado import gen
import momoko
import tornado.web
import logging
logger = logging.getLogger('kisspy.' + __name__)
MAX_ADMIN_UID=2
from models import A,B,C,User, Link, Visit, Config
from settings import MEDIA_ROOT
import os
import time
import forms
class BaseAdminMixin(object):
#def prepare(self):
# pass
@gen.coroutine
def get_visits(self, uid=None, page_size=100, offset=0):
params={'offset':offset,'limit':page_size}
visits = Visit.select().paginate((offset % page_size)+1, page_size)
if uid:
params.update({'uid':uid})
visits = (Visit.select(Visit, User.nickname)
.where(Visit.uid==uid)
.join(User)
.group_by(Visit)
.paginate((offset % page_size)+1, page_size))
else:
visits = (Visit.select(Visit, User.nickname)
.join(User)
.group_by(Visit)
.paginate((offset % page_size)+1, page_size))
raise gen.Return(visits)
@gen.coroutine
def get_visits_total(self, uid=None):
if uid:
results_count=Visit.select().where(Visit.uid==uid).count()
else:
results_count=Visit.select().count()
raise gen.Return(results_count)
@gen.coroutine
def check_superuser(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
class AdminHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=User.select().count()
msg=None
kwargs={
'msg':msg,
'users_total':users_total,
}
self.render("admin/index.html",**kwargs)
class AdminSystemHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
try:
config=Config.select().where(Config.id==1).get()
except Config.DoesNotExist:
config=Config()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
'config':config,
}
self.render("admin/system.html",**kwargs)
@tornado.web.authenticated
@gen.coroutine
def post(self):
#print self.request
#print self.request.body
#print self.request.arguments
#print self.request.files.keys()
#print self.request.files['logo']
#print '-'*80
config_id = int(self.get_argument('config_id','1'))
ip = self.get_argument('ip','')
domain = self.get_argument('domain','')
sitename = self.get_argument('sitename','')
siteurl = self.get_argument('siteurl','')
title = self.get_argument('title','')
keywords = self.get_argument('keywords','')
description = self.get_argument('description','')
copyright = self.get_argument('copyright','')
shutdown = int(self.get_argument('shutdown','0'))
reason = self.get_argument('reason','')
logo = self.get_argument('logo','')
print logo
try:
file_dict_list = self.request.files['logo']
except KeyError:
filename = None # no image uploaded
else:
for fd in file_dict_list:
filename = fd["filename"]
ext=filename.split('.')[-1]
filename = 'logo%s.%s' % (str(int(1000*(time.time()))), ext)
filepath = os.path.join(MEDIA_ROOT, 'images', filename)
f = open(filepath, "wb")
f.write(fd["body"])
f.close()
try:
config=Config.select().where(Config.id==config_id).get()
except:
config_count= Config.select().count()
if config_count>0:
raise tornado.web.HTTPError(500, 'Server Config is broken!')
else:
defaults={}
config=Config(sitename='ABCcms', siteurl='http://localhost')
config.save()
print config
print 'shutdown', bool(shutdown)
config.sitename=sitename
config.siteurl=siteurl
config.title=title
config.keywords=keywords
config.description=description
config.copyright=copyright
config.shutdown=bool(shutdown)
config.reason=reason
config.ip=ip
config.domain=domain
if filename:
config.logo=filename
config.save()
self.application.reload_config()
self.redirect('/admin/system')
class AdminThreadHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
}
self.render("admin/thread.html",**kwargs)
class AdminUserHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
}
self.render("admin/user.html",**kwargs)
class AdminUserAddHandler(BaseAdminMixin, BaseHandler):
def get(self):
users_total=User.select().count()
users=User.select()
form = forms.UserForm()
print form
print dir(form)
kwargs={
'form':form,
'users':users,
'users_total':users_total,
}
self.render('admin/user_add.html', **kwargs)
@gen.coroutine
def post(self):
form = forms.UserForm(self)
if form.validate():
self.write('Hello %s' % form.planet.data)
else:
self.render('index.html', form=form)
email = self.get_argument('email', '').strip()
username = self.get_argument('username', '').strip()
password1 = self.get_argument('password1', '').strip()
password2 = self.get_argument('password2', '').strip()
if password1 != password2:
error_msg = tornado.escape.url_escape("Password is not match!")
self.write(u'/user/register?error=' + error_msg)
return
if email == '':
error_msg = tornado.escape.url_escape("Email is required!")
self.redirect(u"/user/register?error=" + error_msg)
return
else:
if email.find('@')==-1:
error_msg = tornado.escape.url_escape("Email is invalid!")
self.redirect(u"/user/register?error=" + error_msg)
if not username:
username=email.split('@')[0]
exist,msg = yield self.exist(email=email, username=username)
if exist:
# exist user email or username
error_msg = u'?error=' + tornado.escape.url_escape('Login name already taken')
self.redirect(u'/user/register?error=' + error_msg)
return
if password1:
password = password1
else:
error_msg = u'?error=' + tornado.escape.url_escape('Password not set')
self.redirect(u'/user/register?error=' + error_msg)
return
user = {}
user['email'] = email
user['username'] = username
user['password'] = password
user = yield self.add_user(**user)
if user:
self.set_current_user(user)
self.redirect('/admin/user')
return
class AdminVisitHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
results_count=yield self.get_visits_total()
visits=yield self.get_visits(offset=offset)
kwargs={
'msg':msg,
'users':users,
'users_total':users_total,
'visits':visits,
'results_count':results_count,
'page_size':page_size,
'page':page
}
self.render("admin/visit.html",**kwargs)
class AdminLoginHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
kwargs={
}
self.render("admin/login.html",**kwargs)
class AdminLogoutHandler(BaseAdminMixin, BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
self.clear_cookie(self.djbhash('user'))
self.redirect('/')
class AdminChannelHandler(BaseHandler):
@tornado.web.authenticated
@gen.coroutine
def get(self):
#print repr(self.application.sitename)
user=self.get_current_user()
if user:
logger.error(str(user))
if user['uid']>MAX_ADMIN_UID:
raise tornado.web.HTTPError(404,'Page Not Found!')
users_total=yield self.get_users_total()
users=User.select()
channels=A.select()
try:
config=Config.select().where(Config.id==1).get()
except Config.DoesNotExist:
config=Config()
msg=self.get_argument('msg','')
page = int(self.get_argument('page','1'))
page_size = 100
offset = page_size*(page-1)
kwargs={
'msg':msg,
'users':users,
'channels': channels,
'users_total':users_total,
'config':config,
}
self.render("admin/channel.html",**kwargs)
@tornado.web.authenticated
@gen.coroutine
def post(self):
#print self.request
#print self.request.body
#print self.request.arguments
#print self.request.files.keys()
#print self.request.files['logo']
#print '-'*80
config_id = int(self.get_argument('config_id','1'))
ip = self.get_argument('ip','')
domain = self.get_argument('domain','')
sitename = self.get_argument('sitename','')
siteurl = self.get_argument('siteurl','')
title = self.get_argument('title','')
keywords = self.get_argument('keywords','')
description = self.get_argument('description','')
copyright = self.get_argument('copyright','')
shutdown = int(self.get_argument('shutdown','0'))
reason = self.get_argument('reason','')
logo = self.get_argument('logo','')
print logo
try:
file_dict_list = self.request.files['logo']
except KeyError:
filename = None # no image uploaded
else:
for fd in file_dict_list:
filename = fd["filename"]
ext=filename.split('.')[-1]
filename = 'logo%s.%s' % (str(int(1000*(time.time()))), ext)
filepath = os.path.join(MEDIA_ROOT, 'images', filename)
f = open(filepath, "wb")
f.write(fd["body"])
f.close()
try:
config=Config.select().where(Config.id==config_id).get()
except:
config_count= Config.select().count()
if config_count>0:
raise tornado.web.HTTPError(500, 'Server Config is broken!')
else:
defaults={}
config=Config(sitename='ABCcms', siteurl='http://localhost')
config.save()
print config
print 'shutdown', bool(shutdown)
config.sitename=sitename
config.siteurl=siteurl
config.title=title
config.keywords=keywords
config.description=description
config.copyright=copyright
config.shutdown=bool(shutdown)
config.reason=reason
config.ip=ip
config.domain=domain
if filename:
config.logo=filename
config.save()
self.redirect('/admin/channel')
|
kisspy/abcms
|
handlers/admin.py
|
Python
|
mit
| 14,043
|
[
"VisIt"
] |
12ca4b98af62cc99e9dcad52202c66ddfe478a574672254eb69d9318a18a3c12
|
#!/usr/bin/env python3
#
# Copyright 2020 SymbiFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Sphinx Verilog Domain'
copyright = '2020, SymbiFlow Team'
author = 'SymbiFlow Team'
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_verilog_domain',
'symbolator_sphinx',
'sphinxcontrib_verilog_diagrams'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_symbiflow_theme'
# Values used for configuring Sphinx SymbiFlow Theme
html_theme_options = {
'header_links' : [
('Home', 'index', False, 'home'),
],
# Customize css colors.
# For details see link.
# https://getmdl.io/customize/index.html
#
# Values: amber, blue, brown, cyan deep_orange, deep_purple, green, grey, indigo, light_blue,
# light_green, lime, orange, pink, purple, red, teal, yellow(Default: indigo)
'primary_color': 'red',
# Values: Same as primary_color. (Default: pink)
'accent_color': 'blue',
# Customize layout.
# For details see link.
# https://getmdl.io/components/index.html#layout-section
'fixed_drawer': True,
'fixed_header': True,
'header_waterfall': True,
'header_scroll': False,
# Render title in header.
# Values: True, False (Default: False)
'show_header_title': False,
# Render title in drawer.
# Values: True, False (Default: True)
'show_drawer_title': True,
# Render footer.
# Values: True, False (Default: True)
'show_footer': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
SymbiFlow/sphinx-verilog-domain
|
docs/source/conf.py
|
Python
|
isc
| 3,726
|
[
"Amber"
] |
8c77e532f051faa1b3d1f3df4c4d8e99388d4609790c13f058e64e072bcbfe6a
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles directives.
This converter removes the directive functions from the code and moves the
information they specify into AST annotations. It is a specialized form of
static analysis, one that is specific to AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.python.util import tf_inspect
ENCLOSING_LOOP = 'enclosing_loop'
def _map_args(call_node, function):
"""Maps AST call nodes to the actual function's arguments.
Args:
call_node: ast.Call
function: Callable[..., Any], the actual function matching call_node
Returns:
Dict[Text, ast.AST], mapping each of the function's argument names to
the respective AST node.
Raises:
ValueError: if the default arguments are not correctly set
"""
args = call_node.args
kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
call_args = tf_inspect.getcallargs(function, *args, **kwds)
# Keyword arguments not specified in kwds will be mapped to their defaults,
# which are Python values. Since we don't currently have a way to transform
# those into AST references, we simply remove them. By convention, directives
# use UNSPECIFIED as default value for for optional arguments. No other
# defaults should be present.
unexpected_defaults = []
for k in call_args:
if (k not in kwds
and call_args[k] not in args
and call_args[k] is not directives.UNSPECIFIED):
unexpected_defaults.append(k)
if unexpected_defaults:
raise ValueError('Unexpected keyword argument values, %s, for function %s'
% (zip(unexpected_defaults,
[call_args[k] for k in unexpected_defaults]),
function))
return {k: v for k, v in call_args.items() if v is not directives.UNSPECIFIED}
class DirectivesTransformer(converter.Base):
"""Parses compiler directives and converts them into AST annotations."""
def _process_symbol_directive(self, call_node, directive):
if len(call_node.args) < 1:
raise ValueError('"%s" requires a positional first argument'
' as the target' % directive.__name__)
target = call_node.args[0]
defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
for def_ in defs:
def_.directives[directive] = _map_args(call_node, directive)
return call_node
def _process_statement_directive(self, call_node, directive):
if self.local_scope_level < 1:
raise ValueError(
'"%s" must be used inside a statement' % directive.__name__)
target = self.get_local(ENCLOSING_LOOP)
node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
node_anno[directive] = _map_args(call_node, directive)
anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
return call_node
def visit_Expr(self, node):
if isinstance(node.value, gast.Call):
call_node = node.value
if anno.hasanno(call_node.func, 'live_val'):
live_val = anno.getanno(call_node.func, 'live_val')
if live_val is directives.set_element_type:
call_node = self._process_symbol_directive(call_node, live_val)
elif live_val is directives.set_loop_options:
call_node = self._process_statement_directive(call_node, live_val)
else:
return self.generic_visit(node)
return None # Directive calls are not output in the generated code.
return self.generic_visit(node)
# TODO(mdan): This will be insufficient for other control flow.
# That means that if we ever have a directive that affects things other than
# loops, we'll need support for parallel scopes, or have multiple converters.
def _track_and_visit_loop(self, node):
self.enter_local_scope()
self.set_local(ENCLOSING_LOOP, node)
node = self.generic_visit(node)
self.exit_local_scope()
return node
def visit_While(self, node):
return self._track_and_visit_loop(node)
def visit_For(self, node):
return self._track_and_visit_loop(node)
def transform(node, ctx):
return DirectivesTransformer(ctx).visit(node)
|
aselle/tensorflow
|
tensorflow/contrib/autograph/converters/directives.py
|
Python
|
apache-2.0
| 5,008
|
[
"VisIt"
] |
d7264e1e86424c49e0d5028fed6220aeac35cf3c77df71b0f1ab24108a2f337a
|
#!/usr/bin/env python
"""kmos.gui.forms - GUI forms used by kmos.gui
The classes defined here heavily draw on the interface provided by
python-kiwi.
In the language of underlying MVC (Model-View-Controller) pattern these
classes form the controller. The view is defined through a *.glade XML file
and the models are instances of kmos.types.*
"""
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
# Standard library imports
import re
import copy
#gtk import
import pygtk
pygtk.require('2.0')
import gtk
import goocanvas
#kiwi imports
from kiwi.ui.delegates import ProxySlaveDelegate, GladeDelegate, \
SlaveDelegate, ProxyDelegate
from kiwi.ui.views import SlaveView
from kiwi.datatypes import ValidationError
from kiwi.ui.objectlist import Column
# own modules
from kmos.config import GLADEFILE
from kmos.utils import CorrectlyNamed, \
get_ase_constructor, \
col_str2tuple, \
jmolcolor_in_hex
from kmos.types import ProcessFormSite, Process, OutputItem, Coord, \
ConditionAction, Site
from kmos import evaluate_rate_expression
from kmos.types import parse_chemical_expression
# ASE import
import numpy as np
from ase.atoms import Atoms
from ase.data import covalent_radii
class MetaForm(ProxySlaveDelegate, CorrectlyNamed):
"""
Allows to enter meta information about the project.
Please enter author and email so people can credit you for the model.
Increasing the debug level makes the kmos backed create a lot of
output but is typically not needed.
"""
gladefile = GLADEFILE
toplevel_name = 'meta_form'
widgets = ['author', 'email', 'model_name', 'model_dimension', 'debug', ]
def __init__(self, model, project_tree):
ProxySlaveDelegate.__init__(self, model)
#self.model_dimension.set_sensitive(False)
self.project_tree = project_tree
self.author.set_tooltip_text(
'Give a name so people know who to credit for the model.')
self.email.set_tooltip_text(
'Enter an email address so people can get in touch with you.')
self.model_name.set_tooltip_text(
'Give a clear unique name, to identify the model.')
self.model_dimension.set_tooltip_text(
'The source code export function can generate ' +
'1d, 2d, and 3d programs. However this GUI currently only ' +
'supports 2d. 3d is still possible ' +
'by manipulating the project XML file by hand. The algorithm ' +
'though is fast but very memory consuming ' +
'so a 3d simulation might require considerably more RAM.')
self.debug.set_tooltip_text(
'Increasing the debug level might give hints if one suspects ' +
'errors in kmos itself. It does not help to debug your model. ' +
'So usually one wants to keep it a 0.')
self.author.grab_focus()
def on_model_name__validate(self, widget, model_name):
return self.on_name__validate(widget, model_name)
def on_model_name__content_changed(self, _text):
self.project_tree.update(self.model)
def on_model_dimension__content_changed(self, _widget):
self.project_tree.update(self.model)
class SpeciesListForm(ProxySlaveDelegate):
"""Allows to set the default species, `i. e.` the
system will be globally initialized with this species if
nothing else is set on a per site basis.
"""
gladefile = GLADEFILE
toplevel_name = 'species_list_form'
widgets = ['default_species']
def __init__(self, model, project_tree):
# this _ugly_ implementation is due to an apparent catch 22 bug in
# ProxyComboBox: if the value is set already __init__ expect
# the value in the list but you cannot fill the list before
# calling __init__
default_species = model.default_species
model.default_species = None
ProxySlaveDelegate.__init__(self, model)
self.default_species.prefill([x.name
for x in project_tree.get_speciess()],
sort=True)
self.default_species.select(default_species)
self.default_species.set_tooltip_text(
'The lattice will be initialized with this species by default\n'
+ 'but also every unspecified condition or action wil be'
+ 'completed with this choice.\n'
+ 'So better only change this once at the begining if at all!')
class SpeciesForm(ProxySlaveDelegate, CorrectlyNamed):
"""Allows to define a new species. Required attribute is name. The
chosen color will only shop up in the process editor. So choose something
you will remember and recognize.
The representation string is meant to be a ASE ase.atoms.Atoms constructor
that will show up in the ASE visualization.
"""
gladefile = GLADEFILE
toplevel_name = 'species_form'
widgets = ['name', 'color', 'representation']
def __init__(self, model, project_tree):
self.project_tree = project_tree
ProxySlaveDelegate.__init__(self, model)
self.name.grab_focus()
self.name.set_tooltip_text(
'The name here is arbitrary but you will have to type it many times.'
+ 'So you might want to use e.g. CO instead carbon_monoxide')
self.color.set_tooltip_text(
'Choose a color a represent this species in the process editor')
self.representation.set_tooltip_text(
'Set an ASE Atoms(\n\'...\') like string to representation in the '
+ 'auto-generated movie. Please only use \'\' for quotation')
def on_name__content_changed(self, _text):
self.project_tree.update(self.model)
class ParameterForm(ProxySlaveDelegate, CorrectlyNamed):
"""Allows to set parameter. These
parameters can be used in e.g. Transition State Theory
formula to calculate rate constants.
If 'adjustable' is activated then they maybe be changed via
the `kmos view` front end while watching the model run.
"""
gladefile = GLADEFILE
toplevel_name = 'parameter_form'
widgets = ['parameter_name',
'value',
'parameter_adjustable',
'parameter_min',
'parameter_max']
def __init__(self, model, project_tree):
self.project_tree = project_tree
ProxySlaveDelegate.__init__(self, model)
value = self.parameter_adjustable.get_active()
self.parameter_max.set_sensitive(value)
self.parameter_min.set_sensitive(value)
self.name.grab_focus()
self.parameter_adjustable.set_tooltip_text(
'Settings this adjustable will create a bar in the auto-generated ' +
'movie. Dragging this bar will adapt the barrier and recalculate ' +
'all rate constants. This only makes sense for physical ' +
'parameters such a partial pressure but not for e.g. lattice size')
self.parameter_name.set_tooltip_text(
'Choose a sensible name that you remember later when typing rate ' +
'constant formulae. This should not contain spaces')
self.value.set_tooltip_text(
'This defines the initial value for the parameter.')
def on_parameter_adjustable__content_changed(self, _form):
value = self.parameter_adjustable.get_active()
self.parameter_max.set_sensitive(value)
self.parameter_min.set_sensitive(value)
def on_value__content_changed(self, _text):
self.project_tree.update(self.model)
def on_parameter_name__content_changed(self, _text):
self.project_tree.update(self.model)
self.project_tree.project_data.sort_by_attribute('name')
class LatticeForm(ProxySlaveDelegate):
"""Allows to set global lattice parameter such as the lattice vector,
a ASE representation string, and the default layer. The program will
be initialized using the default layer.
"""
gladefile = GLADEFILE
toplevel_name = 'lattice_form'
widgets = ['default_layer',
'lattice_representation']
def __init__(self, model, dimension, project_tree):
default_layer = model.default_layer
model.default_layer = None
ProxySlaveDelegate.__init__(self, model)
self.default_layer.prefill([x.name
for x in project_tree.get_layers()],
sort=True)
self.default_layer.select(default_layer)
self.default_layer.set_tooltip_text(
'By default the system will be initialized with this layer.'
+ 'This only matters if using using more than one layer'
+ '(multi-lattice kMC).')
def on_add_structure__clicked(self, _):
try:
import ase.io
except:
print('Need ASE to do this.')
return
filechooser = gtk.FileChooserDialog(
title='Open structure file',
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
resp = filechooser.run()
filename = filechooser.get_filename()
filechooser.destroy()
if resp == gtk.RESPONSE_OK and filename:
try:
structure = ase.io.read(filename)
if structure is list:
structure = structure[-1]
except:
print('Could not open this file. Please choose')
print('a format that ASE can understand')
return
cur_text = self.lattice_representation.get_buffer().get_text(
self.lattice_representation.get_buffer().get_start_iter(),
self.lattice_representation.get_buffer().get_end_iter())
if not cur_text:
structures = []
else:
structures = eval(cur_text)
structures.append(structure)
self.lattice_representation.get_buffer().set_text(
'[%s]' % (
', '.join(
[get_ase_constructor(x) for x in structures])))
class LayerEditor(ProxySlaveDelegate, CorrectlyNamed):
"""Widget to define a lattice through the sites
in the unit cell (i.e. the `basis` in solid state language).
"""
gladefile = GLADEFILE
toplevel_name = 'layer_form'
widgets = ['layer_name', 'layer_color']
def __init__(self, model, project_tree):
self.project_tree = project_tree
ProxySlaveDelegate.__init__(self, model)
self.canvas = goocanvas.Canvas()
self.root = self.canvas.get_root_item()
self.canvas.set_size_request(400, 400)
self.canvas.set_flags(gtk.HAS_FOCUS | gtk.CAN_FOCUS)
self.canvas.connect('button-press-event', self.on_button_press)
self.layer_nr = self.project_tree.get_layers().index(model)
self.radius_scale = 20
self.scale = 20
self.offset = np.array([150, 300, 0])
self.lattice_pad.add(self.canvas)
self.previous_layer_name = self.layer_name.get_text()
self.redraw()
self.layer_name.set_tooltip_text(
'A name is only relevant if you are using more than one\n'
+ 'layer in your model.')
def _get_atoms(self):
if self.project_tree.lattice.representation:
representations = eval(self.project_tree.lattice.representation)
if len(representations) > self.layer_nr:
atoms = representations[self.layer_nr]
else:
atoms = representations[0]
else:
atoms = Atoms()
return atoms
def redraw(self):
"""Draw the current lattice with unit cell
and sites defined on it.
"""
# draw atoms in background
atoms = self._get_atoms()
self.lower_left = (self.offset[0],
self.offset[1]
+ self.scale * atoms.cell[1, 1])
self.upper_right = (self.offset[0]
+ self.scale * atoms.cell[0, 0],
self.offset[1])
big_atoms = atoms * (3, 3, 1)
for atom in sorted(big_atoms, key=lambda x: x.position[2]):
i = atom.number
radius = self.radius_scale * covalent_radii[i]
color = jmolcolor_in_hex(i)
X = atom.position[0]
Y = - atom.position[1]
goocanvas.Ellipse(parent=self.root,
center_x=(self.offset[0] + self.scale * X),
center_y=(self.offset[1] + self.scale * Y),
radius_x=radius,
radius_y=radius,
stroke_color='black',
fill_color_rgba=color,
line_width=1.0)
# draw unit cell
A = tuple(self.offset[:2])
B = (self.offset[0] + self.scale * (atoms.cell[0, 0]),
self.offset[1] + self.scale * (atoms.cell[0, 1]))
C = (self.offset[0] + self.scale * (atoms.cell[0, 0]
+ atoms.cell[1, 0]),
self.offset[1] - self.scale * (atoms.cell[0, 1]
+ atoms.cell[1, 1]))
D = (self.offset[0] + self.scale * (atoms.cell[1, 0]),
self.offset[1] - self.scale * (atoms.cell[1, 1]))
goocanvas.Polyline(parent=self.root,
close_path=True,
points=goocanvas.Points([A, B, C, D]),
stroke_color='black',)
# draw sites
for x in range(3):
for y in range(3):
for site in self.model.sites:
# convert to screen coordinates
pos = np.dot(site.pos + np.array([x, y, 0]), atoms.cell)
pos *= np.array([1, -1, 1])
pos *= self.scale
pos += self.offset
X = pos[0]
Y = pos[1]
o = goocanvas.Ellipse(parent=self.root,
center_x=X,
center_y=Y,
radius_x=.3 * self.radius_scale,
radius_y=.3 * self.radius_scale,
stroke_color='black',
fill_color='white',
line_width=1.0,)
o.site = site
o.connect('query-tooltip', self.query_tooltip)
self.canvas.hide()
self.canvas.show()
def query_tooltip(self, _canvas, widget, tooltip):
tooltip.set_text(widget.site.name)
return True
def on_button_press(self, _item, event):
atoms = self._get_atoms()
pos = (np.array([event.x, event.y, 0]) - self.offset)
# convert from screen coordinates
pos *= [1, -1, 1]
pos /= self.scale
pos = np.linalg.solve(atoms.cell.T, pos)
for site in self.model.sites:
d = np.sqrt((pos[0] - site.pos[0]) ** 2 +
(pos[1] - site.pos[1]) ** 2)
if d < 0.10:
SiteForm(site, self, self.project_tree, self.model)
break
else:
new_site = Site()
new_site.name = ''
new_site.pos = pos
# Put z position slightly above
# top atom as a first guess.
# Assumes a binding distance of 1.3 Angstrom
atoms = self._get_atoms()
z_pos = atoms.get_positions()[:, 2]
z_pos = z_pos if len(z_pos) else [0]
Z = max(z_pos) + 1.3
z = Z / atoms.cell[2, 2]
new_site.pos[2] = z
new_site.layer = self.model.name
self.model.sites.append(new_site)
SiteForm(new_site, self, self.project_tree, self.model)
def on_layer_name__validate(self, widget, layer_name):
# TODO: validate lattice name to be unique
return self.on_name__validate(widget, layer_name)
def on_layer_name__content_changed(self, widget):
# Sync layer names in process coords
new_layer_name = widget.get_text()
for process in self.project_tree.get_processes():
for elem in process.condition_list:
if elem.coord.layer == self.previous_layer_name:
elem.coord.layer = new_layer_name
for elem in process.action_list:
if elem.coord.layer == self.previous_layer_name:
elem.coord.layer = new_layer_name
self.previous_layer_name = new_layer_name
self.project_tree.update(self.model)
class SiteForm(ProxyDelegate, CorrectlyNamed):
"""Allows to create or modify a site when setting up a unit cell.
"""
gladefile = GLADEFILE
toplevel_name = 'site_form'
widgets = ['site_name',
'default_species',
'site_tags']
def __init__(self, site, parent, project_tree, layer):
self.saved_state = copy.deepcopy(site)
self.project_tree = project_tree
default_species = site.default_species
site.default_species = None
ProxyDelegate.__init__(self, site)
# fill species dialog with correct available choices
self.site_default_species.prefill([x.name
for x in
project_tree.get_speciess()],
sort=True)
if default_species == 'default_species':
self.site_default_species.select(
self.project_tree.species_list.default_species)
else:
self.site_default_species.select(default_species)
self.model.default_species = self.site_default_species.get_selected()
self.site_ok.grab_default()
self.site = site
# set site coordinates
self.sitevect_x.set_text(str(site.pos[0]))
self.sitevect_y.set_text(str(site.pos[1]))
self.sitevect_z.set_text(str(site.pos[2]))
self.parent = parent
self.project_tree = project_tree
self.layer = layer
self.show_all()
self.site_name.set_tooltip_text(
'The site name has to be uniquely identify a site (at least '
'within each layer for multi-lattice mode). You may have to '
'type this name a lot, so keep '
'it short but unambiguous. '
'To delete a site, erase name.')
def on_sitevect_x__activate(self, _):
self.on_site_ok__clicked(_)
def on_sitevect_y__activate(self, _):
self.on_site_ok__clicked(_)
def on_sitevect_z__activate(self, _):
self.on_site_ok__clicked(_)
def on_site_name__activate(self, _):
self.on_site_ok__clicked(_)
def on_site_name__validate(self, _widget, site_name):
"""check if other site already has the name"""
if [x for x in self.layer.sites if x.name == site_name]:
self.site_ok.set_sensitive(False)
return ValidationError('Site name needs to be unique')
else:
self.site_ok.set_sensitive(True)
def on_site_cancel__clicked(self, _):
"""If we click cancel revert to previous state
or don't add site, if new."""
if self.saved_state.name:
# if site existed, reset to previous state
self.layer.sites.remove(self.site)
self.layer.sites.append(self.saved_state)
else:
# if site did not exist previously, remove completely
self.layer.sites.remove(self.site)
self.hide()
self.parent.redraw()
def on_site_ok__clicked(self, _button):
self.model.default_species = self.site_default_species.get_selected()
if not len(self.site_name.get_text()):
self.layer.sites.remove(self.model)
self.hide()
self.parent.redraw()
class ProcessForm(ProxySlaveDelegate, CorrectlyNamed):
"""Allows to create and manipulate a process by dragging
species onto respective sites. The the lower species
represents a condition the upper one an action.
Rate constants can be entered directly using all defined parameters.
The tooltip shows the current value if all is entered correctly.
"""
gladefile = GLADEFILE
toplevel_name = 'process_form'
widgets = ['process_name',
'rate_constant',
'process_enabled',
'chemical_expression']
z = 5 # z as in zoom
l = 500 # l as in length
r_cond = 15.
r_act = 10.
r_reservoir = 5.
r_site = 5. # where the center unit cell is in the drawing
X = 2
Y = 2
def __init__(self, process, project_tree):
self.process = process
self.project_tree = project_tree
ProxySlaveDelegate.__init__(self, process)
expression = self.generate_expression()
self.chemical_expression.update(expression, )
self.radius_scale = 20
self.scale = 20
self.offset = np.array([150, 300, 0])
self.draw_from_data()
self.process_name.set_tooltip_text(
'This name has to uniquely identify the process e.g. co_diff_right')
self.chemical_expression.set_tooltip_text(
'This is a fast way to define a process e.g. CO@cus->CO@bridge ' +
'to declare a CO diffusion from site br to site cus or ' +
'CO@cus->CO@cus.(0,1) for a CO diffusion in the up direction. Hit ' +
'ENTER to update the graphical representation.')
self.rate_constant.curr_value = 0.0
expr = self.rate_constant.get_text()
if not expr:
# if nothing entered show explanation
self.rate_constant.set_tooltip_text((
'Python has to be able to evaluate this expression to a ' +
'plain real number. One can use standard mathematical ' +
'functions, parameters that are defined under "Parameters"' +
'or constants and conversion factor such as c, h, e, ' +
'kboltzmann, pi, bar, angstrom'))
else:
try:
self.rate_constant.set_tooltip_text(
'Current value: %.5e s^{-1}' %
evaluate_rate_expression(expr,
self.project_tree.get_parameters()))
except Exception, e:
self.rate_constant.set_tooltip_text(str(e))
rate_constant_terms = ['bar',
'beta',
'eV',
'exp',
'h',
'kboltzmann',
'umass']
for param in self.project_tree.get_parameters():
rate_constant_terms.append(param.name)
self.rate_constant.prefill(rate_constant_terms)
chem_exp_terms = ['->', ]
for species in self.project_tree.get_speciess():
chem_exp_terms.append(species.name)
self.chemical_expression.prefill(chem_exp_terms)
def generate_expression(self):
expr = ''
if not self.process.condition_list + self.process.action_list:
return expr
for i, condition in enumerate(self.process.condition_list):
if i > 0:
expr += ' + '
expr += '%s@%s' % (condition.species, condition.coord.name)
expr += ' -> '
for i, action in enumerate(self.process.action_list):
if i > 0:
expr += ' + '
expr += '%s@%s' % (action.species, action.coord.name)
return expr
def on_rate_constant__validate(self, _widget, expr):
try:
self.rate_constant.set_tooltip_text('Current value: %.2e s^{-1}' %
evaluate_rate_expression(expr,
self.project_tree.get_parameters()))
except Exception, e:
return ValidationError(e)
def on_chemical_expression__activate(self, entry):
text = entry.get_text()
if not text:
self.process.condition_list = []
self.process.action_list = []
self.traw_from_data()
return
# Delete trailing plusses
text = re.sub(r'\s*\+\s', '', text)
# default to empty right-hand side if not existent
while text and text[-1] in '-.':
text = text[:-1]
if not '->' in text:
text += '->'
try:
parse_chemical_expression(eq=text,
process=self.process,
project_tree=self.project_tree)
self.process.condition_list = []
self.process.action_list = []
parse_chemical_expression(eq=text,
process=self.process,
project_tree=self.project_tree)
except Exception, e:
# first remove last term and try again
try:
print("Error ...")
text = re.sub(r'+[^+]*$', '', text)
parse_chemical_expression(eq=text,
process=self.process,
project_tree=self.project_tree)
self.process.condition_list = []
self.process.action_list = []
parse_chemical_expression(eq=text,
process=self.process,
project_tree=self.project_tree)
except Exception, e:
print("Fatal Error ... %s" % e)
self.process.condition_list = []
self.process.action_list = []
finally:
self.draw_from_data()
def query_tooltip(self, item, x, y, keyboard_mode, tooltip, *args, **kwargs):
tooltip.set_text(item.tooltip_text)
return True
def on_lattice(self, x, y):
"""Returns True if (x, y) is in lattice box
"""
return 10 < x < 510 and 80 < y < 580
def button_press(self, _, item, dummy):
coords = item.get_coords()
if item.state == 'reservoir':
o = CanvasOval(self.motion_layer,
*coords, filled=True, bg=item.bg)
o.connect('button-press-event', self.button_press)
o.connect('motion-notify-event', self.drag_motion)
o.connect('button-release-event', self.button_release)
o.state = 'from_reservoir'
o.species = item.species
self.item = o
self.item.clicked = True
self.item.father = item
self.prev_pos = self.item.get_center()
self.canvas.redraw()
def drag_motion(self, _widget, _item, event):
if self.item.clicked:
d = event.x - self.prev_pos[0], event.y - self.prev_pos[1]
self.item.move(*d)
self.prev_pos = event.x, event.y
#@verbose
def button_release(self, _, dummy, event):
self.item.clicked = False
if self.item.state == 'from_reservoir':
if not self.on_lattice(event.x, event.y):
self.item.delete()
else:
close_sites = self.site_layer.find_closest(
event.x,
event.y,
halo=(.2 * self.l) / self.z)
if close_sites:
closest_site = min(close_sites,
key=lambda i:
(i.get_center()[0] - event.x) ** 2
+ (i.get_center()[1] - event.y) ** 2)
coord = closest_site.get_center()
self.item.set_center(*coord)
if not self.process.condition_list \
+ self.process.action_list:
# if no condition or action is defined yet,
# we need to set the center of the editor
self.X = closest_site.i
self.Y = closest_site.j
species = self.item.species
offset = closest_site.i - self.X, closest_site.j - self.Y
name = closest_site.name
layer = closest_site.layer
kmc_coord = Coord(offset=offset,
name=name,
layer=layer)
condition_action = ConditionAction(species=species,
coord=kmc_coord)
if [x for x in self.condition_layer
if x.get_center() == coord]:
self.item.new_parent(self.action_layer)
self.item.set_radius(self.r_act)
self.process.action_list.append(condition_action)
else:
self.item.new_parent(self.condition_layer)
self.item.set_radius(self.r_cond)
self.process.condition_list.append(condition_action)
else:
self.item.delete()
self.chemical_expression.update(self.generate_expression(), )
self.canvas.redraw()
def draw_from_data_old(self):
"""Places circles on the current lattice according
to the conditions and actions defined
"""
def get_species_color(species):
return [x for x in self.project_tree.get_speciess()
if x.name == species][0].color
white = col_str2tuple('#ffffff')
black = col_str2tuple('#000000')
if hasattr(self, 'canvas'):
self.process_pad.remove(self.canvas)
self.canvas = Canvas(bg=white, fg=white)
self.canvas.set_flags(gtk.HAS_FOCUS | gtk.CAN_FOCUS)
self.canvas.grab_focus()
self.canvas.show()
self.process_pad.add(self.canvas)
self.lattice_layer = CanvasLayer()
self.canvas.append(self.lattice_layer)
self.site_layer = CanvasLayer()
self.canvas.append(self.site_layer)
self.condition_layer = CanvasLayer()
self.canvas.append(self.condition_layer)
self.action_layer = CanvasLayer()
self.canvas.append(self.action_layer)
self.frame_layer = CanvasLayer()
self.canvas.append(self.frame_layer)
self.motion_layer = CanvasLayer()
self.canvas.append(self.motion_layer)
# draw lattice
for i in range(self.z):
CanvasLine(self.lattice_layer,
0, i * (self.l / self.z),
500, i * (self.l / self.z),
line_width=1, fg=(.6, .6, .6))
for i in range(self.z):
CanvasLine(self.lattice_layer,
i * (self.l / self.z), 0,
i * (self.l / self.z), 500,
line_width=1, fg=(.6, .6, .6))
active_layers = [x for x in self.project_tree.get_layers()
if x.active]
site_list = []
for active_layer in active_layers:
for site in active_layer.sites:
form_site = ProcessFormSite(name=site.name,
pos=site.pos,
layer=active_layer.name,
color=active_layer.color)
site_list.append(form_site)
for i in range(self.z + 1):
for j in range(self.z + 1):
for site in site_list:
color = col_str2tuple(site.color)
if i == self.X and j == self.Y:
l_site = CanvasOval(self.site_layer, 0, 0, 10, 10,
fg=color)
else:
l_site = CanvasOval(self.site_layer, 0, 0, 10, 10,
fg=color)
l_site.set_center(self.l /
self.z * (i + float(site.pos[0])),
500 - self.l /
self.z * (j + float(site.pos[1])))
l_site.connect('query-tooltip', self.query_tooltip)
# 500 - ... for having scientific coordinates
# and not screen coordinates
l_site.set_radius(5)
l_site.i = i
l_site.j = j
if len(active_layers) > 1:
l_site.tooltip_text = '%s.(%s,%s).%s' % (site.name,
i - self.X,
j - self.Y,
site.layer)
else:
l_site.tooltip_text = '%s.(%s,%s)' % (site.name,
i - self.X,
j - self.Y)
l_site.name = site.name
l_site.offset = (i - self.X, j - self.Y)
l_site.layer = site.layer
# draw frame
frame_col = (.21, .35, .42)
CanvasRect(self.frame_layer, 0, 0, 520, 80, fg=frame_col,
bg=frame_col,
filled=True)
CanvasRect(self.frame_layer, 0, 0, 10, 580, fg=frame_col,
bg=frame_col,
filled=True)
CanvasRect(self.frame_layer, 510, 0, 520, 580, fg=frame_col,
bg=frame_col,
filled=True)
CanvasRect(self.frame_layer, 0, 580, 520, 590, fg=frame_col,
bg=frame_col,
filled=True)
CanvasText(self.frame_layer, 10, 10, size=8, text='Reservoir Area')
CanvasText(self.frame_layer, 10, 570, size=8, text='Lattice Area')
# draw reservoir circles
for k, species in enumerate(self.project_tree.get_speciess()):
color = col_str2tuple(species.color)
o = CanvasOval(self.frame_layer,
30 + k * 50,
30, 50 + k * 50,
50,
filled=True,
bg=color)
o.species = species.name
o.tooltip_text = species.name # for tooltip
o.connect('button-press-event', self.button_press)
#o.connect('motion-notify-event', self.drag_motion)
o.connect('button-release-event', self.button_release)
o.connect('query-tooltip', self.query_tooltip)
o.state = 'reservoir'
self.lattice_layer.move_all(10, 80)
self.site_layer.move_all(10, 80)
# attributes need for moving objects
self.item = None
self.prev_pos = None
black = col_str2tuple('#003333')
for elem in self.process.condition_list:
matching_sites = [x for x in self.site_layer
if isinstance(x, CanvasOval)
and x.i == self.X + elem.coord.offset[0]
and x.j == self.Y + elem.coord.offset[1]
and x.name == elem.coord.name
and x.layer == elem.coord.layer]
if matching_sites:
coords = matching_sites[0].get_coords()
color = get_species_color(elem.species)
color = col_str2tuple(color)
o = CanvasOval(self.condition_layer,
bg=color,
fg=black,
filled=True, outline=True)
o.coords = coords
o.connect('button-press-event',
self.on_condition_action_clicked)
o.set_radius(self.r_cond)
o.type = 'condition'
o.condition = elem
o.tooltip_text = '%s@%s' % (elem.species, elem.coord) # for tooltip
o.connect('query-tooltip', self.query_tooltip)
for elem in self.process.action_list:
matching_sites = [x for x in self.site_layer
if isinstance(x, CanvasOval)
and x.i == self.X + elem.coord.offset[0]
and x.j == self.Y + elem.coord.offset[1]
and x.name == elem.coord.name
and x.layer == elem.coord.layer]
if matching_sites:
coords = matching_sites[0].get_coords()
if elem.species[0] == '^':
color = get_species_color(elem.species[1:])
layer = self.action_layer
radius = self.r_act
line_width = 2.0
elif elem.species[0] == '$':
color = get_species_color(elem.species[1:])
layer = self.condition_layer
radius = self.r_cond
line_width = 2.0
else:
color = get_species_color(elem.species)
layer = self.action_layer
radius = self.r_act
line_width = 1.0
color = col_str2tuple(color)
o = CanvasOval(layer,
bg=color,
fg=black,
line_width=line_width,
filled=True,
outline=True)
o.coords = coords
o.connect('button-press-event',
self.on_condition_action_clicked)
o.set_radius(radius)
o.type = 'action'
o.action = elem
o.tooltip_text = '%s@%s' % (elem.species, elem.coord) # for tooltip
o.connect('query-tooltip', self.query_tooltip)
def draw_from_data(self):
atoms = self._get_atoms()
def toscrn(coord,
screen_size=(500, 500),
scale=None,
offset=None):
if scale is None:
scale = min(screen_size[0]/(atoms.cell[0] + atoms.cell[1])[0],
screen_size[1]/(atoms.cell[0] + atoms.cell[1])[1])
scale /= (zoom + 1)
if offset is None:
offset = ((screen_size[0] - zoom*scale*(atoms.cell[0] + atoms.cell[1])[0])/2,
(screen_size[1] - zoom*scale*(atoms.cell[0] + atoms.cell[1])[1])/2,)
return (scale * coord[0] + offset[0],
screen_size[1] - (scale * coord[1] + offset[1]))
zoom = 3
center_x = zoom / 2
center_y = zoom / 2
if hasattr(self, 'canvas'):
self.process_pad.remove(self.canvas)
canvas = goocanvas.Canvas()
self.canvas = canvas
root = canvas.get_root_item()
canvas.set_flags(gtk.HAS_FOCUS | gtk.CAN_FOCUS)
canvas.set_property('has-tooltip', True)
#canvas.grab_focus()
canvas.show()
self.process_pad.add(canvas)
radius = 10
# draw lattice
for i in range(zoom + 1):
for _0, _1, _2, _3 in [[i, i, 0, zoom],
[0, zoom, i, i]]:
points = goocanvas.Points([
toscrn(atoms.cell[0]*_0 + atoms.cell[1]*_2),
toscrn(atoms.cell[0]*_1 + atoms.cell[1]*_3),
])
goocanvas.Polyline(parent=root,
points=points,
stroke_color='black',
fill_color='white',
line_width=1.0)
# emphasize central cell
points = goocanvas.Points([
toscrn(atoms.cell[0]*center_x + atoms.cell[1]*center_x),
toscrn(atoms.cell[0]*center_x + atoms.cell[1]*(center_x + 1)),
toscrn(atoms.cell[0]*(center_x + 1) + atoms.cell[1]*(center_x + 1)),
toscrn(atoms.cell[0]*(center_x + 1) + atoms.cell[1]*center_x),
toscrn(atoms.cell[0]*center_x + atoms.cell[1]*center_x),
])
goocanvas.Polyline(parent=root,
points=points,
stroke_color='black',
fill_color='white',
line_width=2.0)
# draw sites
for x in range(zoom):
for y in range(zoom):
sites = self.project_tree.get_layers()[0].sites
for site in sites:
X, Y = toscrn(x*atoms.cell[0]
+ y*atoms.cell[1]
+ np.inner(atoms.cell.T, site.pos))
tooltip = '%s.(%s, %s, 0).%s' % (site.name,
x-1, y-1,
self.project_tree.get_layers()[0].name
)
o = goocanvas.Ellipse(parent=root,
center_x=X,
center_y=Y,
radius_x=.4 * radius,
radius_y=.4 * radius,
stroke_color='black',
fill_color='white',
line_width=1.0,
tooltip=tooltip,
)
# draw reservoir circles
for k, species in enumerate(self.project_tree.get_speciess()):
color = col_str2tuple(species.color)
o = goocanvas.Ellipse(parent=root,
center_x=30 + k * 50,
center_y=30,
radius_x=0.8*radius,
radius_y=0.8*radius,
stroke_color='black',
fill_color_rgba=eval('0x' + species.color[1:] + 'ff' ),
tooltip=species.name,
)
for elem in self.process.condition_list:
pos = [x.pos
for layer in self.project_tree.get_layers()
for x in layer.sites
if x.name == elem.coord.name
][0]
species_color = [x.color for x in self.project_tree.get_speciess()
if x.name == elem.species.split(' or ')[0]][0]
center = toscrn(np.inner(pos + elem.coord.offset + center_x, atoms.cell.T))
tooltip = 'Condition: %s@%s.%s.%s' % (elem.species,
elem.coord.name,
tuple(elem.coord.offset),
elem.coord.layer) # for tooltip
o = goocanvas.Ellipse(parent=root,
center_x=center[0],
center_y=center[1],
radius_x=0.8*radius,
radius_y=0.8*radius,
stroke_color='black',
fill_color_rgba=eval('0x' + species_color[1:] + 'ff' ),
tooltip=tooltip,
)
for elem in self.process.action_list:
species_color = [x.color for x in self.project_tree.get_speciess()
if x.name == elem.species][0]
pos = [x.pos
for layer in self.project_tree.get_layers()
for x in layer.sites
if x.name == elem.coord.name
][0]
center = toscrn(np.inner(pos + elem.coord.offset + center_x, atoms.cell.T))
tooltip = 'Action: %s@%s.%s.%s' % (elem.species,
elem.coord.name,
tuple(elem.coord.offset),
elem.coord.layer) # for tooltip
o = goocanvas.Ellipse(parent=root,
center_x=center[0],
center_y=center[1],
radius_x=0.4*radius,
radius_y=0.4*radius,
stroke_color='black',
fill_color_rgba=eval('0x' + species_color[1:] + 'ff' ),
tooltip=tooltip,
)
def _get_atoms(self, layer_nr=0):
if self.project_tree.lattice.representation:
representations = eval(self.project_tree.lattice.representation)
if len(representations) > layer_nr:
atoms = representations[layer_nr]
else:
atoms = representations[0]
else:
atoms = Atoms()
return atoms
def on_condition_action_clicked(self, _canvas, widget, event):
if event.button == 2:
if widget.type == 'action':
self.process.action_list.remove(widget.action)
elif widget.type == 'condition':
self.process.condition_list.remove(widget.condition)
widget.delete()
def on_process_name__content_changed(self, _text):
self.project_tree.project_data.sort_by_attribute('name')
self.project_tree.update(self.process)
def on_rate_constant__content_changed(self, _text):
self.project_tree.update(self.process)
class BatchProcessForm(SlaveDelegate):
"""Allows to enter many processes at once. The format is one
process per line in the form::
[process name] ; [chemical expression] ; [rate constant]
One can omit the fields but not the semicolon.
"""
gladefile = GLADEFILE
toplevel_name = 'batch_process_form'
def __init__(self, project_tree):
self.project_tree = project_tree
SlaveDelegate.__init__(self)
def on_btn_evaluate__clicked(self, _):
batch_buffer = self.batch_processes.get_buffer()
bounds = batch_buffer.get_bounds()
text = batch_buffer.get_text(*bounds)
text = text.split('\n')
for i, line in enumerate(text):
# Ignore empty lines
if not line.count(';'):
continue
if not line.count(';'):
raise UserWarning(
("Line %s: the number of fields you entered is %s, " \
"but I expected 3") % (i, line.count(';') + 1))
line = line.split(';')
name = line[0]
if len(line) == 1:
rate_constant = ''
elif len(line) == 2:
rate_constant = ''
elif len(line) == 3:
rate_constant = line[2]
else:
raise UserWarning(
"There are too many ';' in your expression %s" % line)
process = Process(name=name, rate_constant=rate_constant)
try:
parse_chemical_expression(eq=line[1],
process=process,
project_tree=self.project_tree)
self.draw_from_data()
except:
raise Exception(
("Found an error in your chemical expression(line %s):\n"\
"%s") % (i + 1, line[1]))
else:
# replace any existing process with identical names
for dublette_proc in [x for x in
self.project_tree.process_list
if x.name == name]:
self.project_tree.process_list.remove(dublette_proc)
self.project_tree.append(self.project_tree.process_list_iter,
process)
batch_buffer.delete(*bounds)
class OutputForm(GladeDelegate):
"""Not implemented yet
"""
gladefile = GLADEFILE
toplevel_name = 'output_form'
widgets = ['output_list']
def __init__(self, output_list, project_tree):
GladeDelegate.__init__(self)
self.project_tree = project_tree
self.output_list_data = output_list
self.output_list.set_columns([Column('name',
data_type=str,
editable=True, sorted=True),
Column('output',
data_type=bool,
editable=True)])
for item in self.output_list_data:
self.output_list.append(item)
self.output_list.show()
self.output_list.grab_focus()
def on_add_output__clicked(self, _):
output_form = gtk.MessageDialog(parent=None,
flags=gtk.DIALOG_MODAL,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_OK_CANCEL,
message_format='Please enter a new ' \
+ 'output: examples are a species ' \
+ 'or species@site')
output_form.set_flags(gtk.CAN_DEFAULT | gtk.CAN_FOCUS)
output_form.set_default_response(gtk.RESPONSE_OK)
output_form.set_default(
output_form.get_widget_for_response(gtk.RESPONSE_OK))
form_entry = gtk.Entry()
def activate_default(_):
output_form.activate_default()
form_entry.connect('activate', activate_default)
output_form.vbox.pack_start(form_entry)
output_form.vbox.show_all()
res = output_form.run()
output_str = form_entry.get_text()
output_form.destroy()
if res == gtk.RESPONSE_OK:
output_item = OutputItem(name=output_str, output=True)
self.output_list.append(output_item)
self.output_list_data.append(output_item)
class InlineMessage(SlaveView):
"""Return a nice little field with a text message on it
"""
gladefile = GLADEFILE
toplevel_name = 'inline_message'
widgets = ['message_label']
def __init__(self, message=''):
SlaveView.__init__(self)
self.message_label.set_text(message)
|
mieand/kmos
|
kmos/gui/forms.py
|
Python
|
gpl-3.0
| 52,360
|
[
"ASE"
] |
b67db1f81649ed60c22861eafb375f0d1960db8a50d15ad672a34b82d1d8d2b3
|
#!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for taxonomy/wifi.py."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import unittest
import dhcp
import wifi
class WifiTaxonomyTest(unittest.TestCase):
def setUp(self):
dhcp.DHCP_LEASES_FILE = 'testdata/dhcp.leases'
dhcp.DHCP_SIGNATURE_FILE = 'testdata/dhcp.signatures'
def testLookup(self):
signature = ('wifi4|probe:0,1,50,45,htcap:186e|assoc:0,1,50,48,'
'221(0050f2,2),45,127,htcap:086c,htmcs:000000ff')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual(3, len(taxonomy))
self.assertEqual('802.11n n:1,w:20', taxonomy[2])
signature = ('wifi4|probe:0,1,45,191,221(0050f2,4),221(506f9a,9),'
'221(001018,2),htcap:006f,htagg:17,htmcs:0000ffff,'
'vhtcap:0f815832,vhtrxmcs:0000fffa,vhttxmcs:0000fffa,'
'extcap:000008800140,wps:Nexus_6|assoc:0,1,33,36,48,45,'
'127,191,221(001018,2),221(0050f2,2),htcap:006f,htagg:17,'
'htmcs:0000ffff,vhtcap:0f815832,vhtrxmcs:0000fffa,'
'vhttxmcs:0000fffa,txpow:e009,extcap:000008800140')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:02')
expected = 'Nexus 6;;802.11ac n:2,w:80'
self.assertEqual(';'.join(taxonomy), expected)
self.assertEqual(3, len(taxonomy))
self.assertEqual('Nexus 6', taxonomy[0])
self.assertEqual('', taxonomy[1])
self.assertEqual('802.11ac n:2,w:80', taxonomy[2])
def testNameLookup(self):
signature = ('wifi4|probe:0,1,45,3,221(001018,2),221(00904c,51),htcap:0100,'
'htagg:19,htmcs:000000ff|assoc:0,1,33,36,48,45,221(001018,2),'
'221(00904c,51),221(0050f2,2),htcap:0100,htagg:19,'
'htmcs:000000ff,txpow:180f')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual(3, len(taxonomy))
self.assertEqual('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual(3, len(taxonomy))
self.assertEqual('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, 'c8:69:cd:5e:b5:43')
self.assertEqual('Apple TV', taxonomy[0])
self.assertEqual('3rd gen', taxonomy[1])
def testChecksumWhenNoIdentification(self):
taxonomy = wifi.identify_wifi_device('wifi|probe:1,2,3,4,htcap:0|assoc:1',
'00:00:01:00:00:01')
h = 'SHA:27b78dbb1bc795961ddad0686137eb9fddbbc7f8766bd8947b4deca563b830be'
self.assertIn(h, taxonomy[0])
def testOUI(self):
# Devices with a generic signature, distinguished via MAC OUI
signature = ('wifi4|probe:0,1,50,3,45,221(0050f2,8),htcap:012c,htagg:03,'
'htmcs:000000ff|assoc:0,1,50,33,48,70,45,221(0050f2,2),127,'
'htcap:012c,htagg:03,htmcs:000000ff,txpow:170d,'
'extcap:00000a0200000000')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, 'b4:52:7e:00:00:01')
self.assertIn('Sony Xperia', taxonomy[0])
taxonomy = wifi.identify_wifi_device(signature, 'f8:f1:b6:00:00:01')
self.assertIn('Moto E', taxonomy[0])
self.assertIn('2nd gen', taxonomy[1])
# Test one of the OUIs with multiple vendors listed.
signature = ('wifi4|probe:0,1,3,45,50,htcap:0120,htagg:03,htmcs:00000000|'
'assoc:0,1,48,50,127,221(0050f2,2),45,htcap:012c,'
'htagg:03,htmcs:000000ff,extcap:0000000000000140|oui:google')
taxonomy = wifi.identify_wifi_device(signature, '6c:ad:f8:00:00:01')
self.assertEqual('Chromecast', taxonomy[0])
self.assertEqual('v1', taxonomy[1])
def testOS(self):
signature = 'wifi4|probe:0,1,50|assoc:0,1,50,48,221(0050f2,2)'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, '28:ef:01:00:00:01')
self.assertIn('Kindle', taxonomy[0])
def testCommonSignature(self):
signature = ('wifi4|probe:0,1,50,45,221(001018,2),221(00904c,51),'
'htcap:182c,htagg:1b,htmcs:000000ff|assoc:0,1,48,50,45,'
'221(001018,2),221(00904c,51),221(0050f2,2),'
'htcap:182c,htagg:1b,htmcs:000000ff')
# This is a very common signature among devices issued in 2010.
# It will match:
# Samsung Captivate, Epic 2, Fascinate, Continuum, Charge, Vibrant
# Samsung Galaxy Tab 2
# HTC Thunderbolt
# ASUS Transformer TF300
# Nexus One
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, 'b4:ce:f6:00:00:01')
self.assertIn('Unknown', taxonomy[1])
taxonomy = wifi.identify_wifi_device(signature, 'ac:22:0b:00:00:01')
self.assertIn('Unknown', taxonomy[1])
def testUnknown(self):
signature = 'wifi4|probe:0,1,2,vhtcap:0033|assoc:3,4,vhtcap:0033'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('802.11ac', taxonomy[2])
self.assertNotIn('802.11n', taxonomy[2])
self.assertNotIn('802.11a/b/g', taxonomy[2])
signature = 'wifi4|probe:0,1,2,htcap:0033|assoc:3,4,htcap:0033'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertNotIn('802.11ac', taxonomy[2])
self.assertIn('802.11n', taxonomy[2])
self.assertNotIn('802.11a/b/g', taxonomy[2])
signature = 'wifi4|probe:0,1,2|assoc:3,4'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertNotIn('802.11ac', taxonomy[2])
self.assertNotIn('802.11n', taxonomy[2])
self.assertIn('802.11a/b/g', taxonomy[2])
def test802_11n_NssWidth(self):
signature = 'wifi4|probe:0|assoc:1,htcap:012c,htagg:03,htmcs:000000ff'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:1,w:20', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,htcap:0102,htagg:03,htmcs:0000ffff'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:2,w:40', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,htcap:0200,htagg:03,htmcs:00ffffff'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:3,w:20', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,htcap:0302,htagg:03,htmcs:ffffffff'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:4,w:40', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11a/b/g n:1,w:20', taxonomy[2])
def test802_11ac_Width(self):
signature = ('wifi4|probe:0|assoc:1,htcap:0302,htmcs:000000ff,'
'vhtcap:00000000,vhtrxmcs:0000ffaa,vhttxmcs:0000ffaa')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:4,w:80', taxonomy[2])
signature = ('wifi4|probe:0|assoc:1,htcap:0200,htmcs:000000ff,'
'vhtcap:00000004,vhtrxmcs:0000ffea,vhttxmcs:0000ffea')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:3,w:160', taxonomy[2])
signature = ('wifi4|probe:0|assoc:1,htcap:0200,htmcs:000000ff,'
'vhtcap:00000004,vhtrxmcs:0000fffa,vhttxmcs:0000fffa')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:2,w:160', taxonomy[2])
signature = ('wifi4|probe:0|assoc:1,htcap:0200,htmcs:000000ff,'
'vhtcap:00000004,vhtrxmcs:0000fffe,vhttxmcs:0000fffe')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:1,w:160', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,vhtcap:00000008'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:?,w:80+80', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,vhtcap:0000000c'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11ac n:?,w:??', taxonomy[2])
def testPerformanceInfoBroken(self):
signature = ('wifi4|probe:0,htmcs:000000ff|assoc:0,htmcs:000000ff')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11a/b/g n:1,w:20', taxonomy[2])
signature = ('wifi4|probe:0,htcap:wrong,htmcs:ffffffff|'
'assoc:0,htcap:wrong,htmcs:ffffffff')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:4,w:??', taxonomy[2])
signature = ('wifi4|probe:0,htcap:012c,htmcs:wrong|'
'assoc:0,htcap:012c,htmcs:wrong')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:?,w:20', taxonomy[2])
signature = ('wifi4|probe:0,htcap:wrong,htmcs:wrong|'
'assoc:0,htcap:wrong,htmcs:wrong')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:?,w:??', taxonomy[2])
def testRealClientsPerformance(self):
"""Test the performance information for a few real clients."""
# Nest Thermostat
sig = ('wifi4|probe:0,1,50,45,htcap:0130,htagg:18,htmcs:000000ff|assoc:'
'0,1,50,48,45,221(0050f2,2),htcap:013c,htagg:18,htmcs:000000ff')
taxonomy = wifi.identify_wifi_device(sig, '18:b4:30:00:00:01')
self.assertEqual('802.11n n:1,w:20', taxonomy[2])
# Samsung Galaxy S4
sig = (
'wifi4|probe:0,1,45,127,191,221(001018,2),221(00904c,51),221(00904c,'
'4),221(0050f2,8),htcap:006f,htagg:17,htmcs:000000ff,vhtcap:0f805832,'
'vhtrxmcs:0000fffe,vhttxmcs:0000fffe|assoc:0,1,33,36,48,45,127,191,'
'221(001018,2),221(00904c,4),221(0050f2,2),htcap:006f,htagg:17,htmcs:'
'000000ff,vhtcap:0f805832,vhtrxmcs:0000fffe,vhttxmcs:0000fffe')
taxonomy = wifi.identify_wifi_device(sig, 'cc:3a:61:00:00:01')
self.assertEqual('802.11ac n:1,w:80', taxonomy[2])
# MacBook Pro 802.11ac
sig = (
'wifi4|probe:0,1,45,127,191,221(00904c,51),htcap:09ef,htagg:17,'
'htmcs:0000ffff,vhtcap:0f8259b2,vhtrxmcs:0000ffea,vhttxmcs:0000ffea|'
'assoc:0,1,33,36,48,45,127,191,221(00904c,51),221(0050f2,2),htcap:09ef,'
'htagg:17,htmcs:0000ffff,vhtcap:0f8259b2,vhtrxmcs:0000ffea,'
'vhttxmcs:0000ffea')
taxonomy = wifi.identify_wifi_device(sig, '3c:15:c2:00:00:01')
self.assertEqual('802.11ac n:3,w:80', taxonomy[2])
def testBrokenNssWidth(self):
"""Test for broken client behavior.
A few clients, notably Nexus 4 with Android 4.2,
include a VHT Capabilities in their Probe even
though they are not 802.11ac devices. Presumably
the driver supports other chipsets which are.
To work around this, taxonomy is only supposed to
look at the Association for determining client
performance characteristics.
"""
signature = ('wifi4|probe:0,1,50,45,221(0050f2,8),191,221(0050f2,4),'
'221(506f9a,9),htcap:012c,htagg:03,htmcs:000000ff,'
'vhtcap:31811120,vhtrxmcs:01b2fffc,vhttxmcs:01b2fffc,'
'wps:Nexus_4|assoc:0,1,50,48,45,221(0050f2,2),'
'htcap:012c,htagg:03,htmcs:000000ff')
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertEqual('802.11n n:1,w:20', taxonomy[2])
def testCorruptFiles(self):
signature = 'wifi4|probe:0|assoc:1,htcap:this_is_not_a_number'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('802.11n', taxonomy[2])
signature = 'wifi4|probe:0|assoc:1,vhtcap:this_is_not_a_number'
taxonomy = wifi.identify_wifi_device(signature, '00:00:01:00:00:01')
self.assertIn('802.11ac', taxonomy[2])
if __name__ == '__main__':
unittest.main()
|
NetworkDeviceTaxonomy/wifi_taxonomy
|
tests/wifi_test.py
|
Python
|
apache-2.0
| 12,633
|
[
"Galaxy"
] |
50e4e27c5bcde4db09d56514685d49ee3a0ebc1192521e397bd9029577c6255d
|
"""
Test of the classical LM model for language modelling
"""
from groundhog.datasets import LMIterator
from groundhog.trainer.SGD_momentum import SGD as SGD_m
from groundhog.trainer.SGD import SGD
from groundhog.mainLoop import MainLoop
from groundhog.layers import MultiLayer, \
RecurrentMultiLayer, \
RecurrentMultiLayerInp, \
RecurrentMultiLayerShortPath, \
RecurrentMultiLayerShortPathInp, \
RecurrentMultiLayerShortPathInpAll, \
SoftmaxLayer, \
LastState,\
UnaryOp, \
Operator, \
Shift, \
GaussianNoise, \
SigmoidLayer
from groundhog.layers import maxpool, \
maxpool_ntimes, \
last, \
last_ntimes,\
tanh, \
sigmoid, \
rectifier,\
hard_sigmoid, \
hard_tanh
from groundhog.models import LM_Model
from theano.sandbox.scan import scan
import numpy
import theano
import theano.tensor as TT
linear = lambda x:x
theano.config.allow_gc = False
def get_text_data(state):
def out_format (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
def out_format_valid (x, y, r):
return {'x':x, 'y' :y, 'reset': r}
train_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
seq_len = state['seqlen'],
mode="train",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format,
can_fit=True)
valid_data = LMIterator(
batch_size=state['bs'],
path=state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences = True,
seq_len= state['seqlen'],
mode="valid",
reset =state['reset'],
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
test_data = LMIterator(
batch_size=state['bs'],
path = state['path'],
stop=-1,
use_infinite_loop=False,
allow_short_sequences=True,
seq_len= state['seqlen'],
mode="test",
chunks=state['chunks'],
shift = state['shift'],
output_format = out_format_valid,
can_fit=True)
if 'wiki' in state['path']:
test_data = None
return train_data, valid_data, test_data
def jobman(state, channel):
# load dataset
rng = numpy.random.RandomState(state['seed'])
# declare the dimensionalies of the input and output
if state['chunks'] == 'words':
state['n_in'] = 10000
state['n_out'] = 10000
else:
state['n_in'] = 50
state['n_out'] = 50
train_data, valid_data, test_data = get_text_data(state)
## BEGIN Tutorial
### Define Theano Input Variables
x = TT.lvector('x')
y = TT.lvector('y')
h0 = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
### Neural Implementation of the Operators: \oplus
#### Word Embedding
emb_words = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inp_nhids']),
activation=eval(state['inp_activ']),
init_fn='sample_weights_classic',
weight_noise=state['weight_noise'],
rank_n_approx = state['rank_n_approx'],
scale=state['inp_scale'],
sparsity=state['inp_sparse'],
learn_bias = True,
bias_scale=eval(state['inp_bias']),
name='emb_words')
#### Deep Transition Recurrent Layer
rec = eval(state['rec_layer'])(
rng,
eval(state['nhids']),
activation = eval(state['rec_activ']),
#activation = 'TT.nnet.sigmoid',
bias_scale = eval(state['rec_bias']),
scale=eval(state['rec_scale']),
sparsity=eval(state['rec_sparse']),
init_fn=eval(state['rec_init']),
weight_noise=state['weight_noise'],
name='rec')
#### Stiching them together
##### (1) Get the embedding of a word
x_emb = emb_words(x, no_noise_bias=state['no_noise_bias'])
##### (2) Embedding + Hidden State via DT Recurrent Layer
reset = TT.scalar('reset')
rec_layer = rec(x_emb, n_steps=x.shape[0],
init_state=h0*reset,
no_noise_bias=state['no_noise_bias'],
truncate_gradient=state['truncate_gradient'],
batch_size=1)
### Neural Implementation of the Operators: \lhd
#### Softmax Layer
output_layer = SoftmaxLayer(
rng,
eval(state['nhids'])[-1],
state['n_out'],
scale=state['out_scale'],
bias_scale=state['out_bias_scale'],
init_fn="sample_weights_classic",
weight_noise=state['weight_noise'],
sparsity=state['out_sparse'],
sum_over_time=True,
name='out')
### Few Optional Things
#### Direct shortcut from x to y
if state['shortcut_inpout']:
shortcut = MultiLayer(
rng,
n_in=state['n_in'],
n_hids=eval(state['inpout_nhids']),
activations=eval(state['inpout_activ']),
init_fn='sample_weights_classic',
weight_noise = state['weight_noise'],
scale=eval(state['inpout_scale']),
sparsity=eval(state['inpout_sparse']),
learn_bias=eval(state['inpout_learn_bias']),
bias_scale=eval(state['inpout_bias']),
name='shortcut')
#### Learning rate scheduling (1/(1+n/beta))
state['clr'] = state['lr']
def update_lr(obj, cost):
stp = obj.step
if isinstance(obj.state['lr_start'], int) and stp > obj.state['lr_start']:
time = float(stp - obj.state['lr_start'])
new_lr = obj.state['clr']/(1+time/obj.state['lr_beta'])
obj.lr = new_lr
if state['lr_adapt']:
rec.add_schedule(update_lr)
### Neural Implementations of the Language Model
#### Training
if state['shortcut_inpout']:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias'],
additional_inputs=[shortcut(x)]).train(target=y,
scale=numpy.float32(1./state['seqlen']))
else:
train_model = output_layer(rec_layer,
no_noise_bias=state['no_noise_bias']).train(target=y,
scale=numpy.float32(1./state['seqlen']))
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if state['carry_h0']:
train_model.updates += [(h0, nw_h0)]
#### Validation
h0val = theano.shared(numpy.zeros((eval(state['nhids'])[-1],), dtype='float32'))
rec_layer = rec(emb_words(x, use_noise=False),
n_steps = x.shape[0],
batch_size=1,
init_state=h0val*reset,
use_noise=False)
nw_h0 = rec_layer.out[rec_layer.out.shape[0]-1]
if not state['shortcut_inpout']:
valid_model = output_layer(rec_layer,
use_noise=False).validate(target=y, sum_over_time=True)
else:
valid_model = output_layer(rec_layer,
additional_inputs=[shortcut(x, use_noise=False)],
use_noise=False).validate(target=y, sum_over_time=True)
valid_updates = []
if state['carry_h0']:
valid_updates = [(h0val, nw_h0)]
valid_fn = theano.function([x,y, reset], valid_model.cost,
name='valid_fn', updates=valid_updates)
#### Sampling
##### single-step sampling
def sample_fn(word_tm1, h_tm1):
x_emb = emb_words(word_tm1, use_noise = False, one_step=True)
h0 = rec(x_emb, state_before=h_tm1, one_step=True, use_noise=False)[-1]
word = output_layer.get_sample(state_below=h0, temp=1.)
return word, h0
##### scan for iterating the single-step sampling multiple times
[samples, summaries], updates = scan(sample_fn,
states = [
TT.alloc(numpy.int64(0), state['sample_steps']),
TT.alloc(numpy.float32(0), 1, eval(state['nhids'])[-1])],
n_steps= state['sample_steps'],
name='sampler_scan')
##### define a Theano function
sample_fn = theano.function([], [samples],
updates=updates, profile=False, name='sample_fn')
##### Load a dictionary
dictionary = numpy.load(state['dictionary'])
if state['chunks'] == 'chars':
dictionary = dictionary['unique_chars']
else:
dictionary = dictionary['unique_words']
def hook_fn():
sample = sample_fn()[0]
print 'Sample:',
if state['chunks'] == 'chars':
print "".join(dictionary[sample])
else:
for si in sample:
print dictionary[si],
print
### Build and Train a Model
#### Define a model
model = LM_Model(
cost_layer = train_model,
weight_noise_amount=state['weight_noise_amount'],
valid_fn = valid_fn,
clean_before_noise_fn = False,
noise_fn = None,
rng = rng)
if state['reload']:
model.load(state['prefix']+'model.npz')
#### Define a trainer
##### Training algorithm (SGD)
if state['moment'] < 0:
algo = SGD(model, state, train_data)
else:
algo = SGD_m(model, state, train_data)
##### Main loop of the trainer
main = MainLoop(train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
train_cost = False,
hooks = hook_fn,
validate_postprocess = eval(state['validate_postprocess']))
main.main()
## END Tutorial
if __name__=='__main__':
state = {}
# complete path to data (cluster specific)
state['seqlen'] = 100
state['path']= "/data/lisa/data/PennTreebankCorpus/pentree_char_and_word.npz"
state['dictionary']= "/data/lisa/data/PennTreebankCorpus/dictionaries.npz"
state['chunks'] = 'chars'
state['seed'] = 123
# flag .. don't need to change it. It says what to do if you get cost to
# be nan .. you could raise, though I would leave it to this
state['on_nan'] = 'warn'
# DATA
# For wikipedia validation set is extremely large. Is very time
# wasteful. This value is only used for validation set, and IMHO should
# be something like seqlen * 10000 (i.e. the validation should be only
# 10000 steps
state['reset'] = -1
# For music/ word level I think 50 is a good idea. For character this
# should be at least 100 (I think there are problems with getting state
# of the art otherwise). Note most people use 200 !
# The job stops when learning rate declines to this value. It can be
# useful, because sometimes is hopeless to wait for validation error to
# get below minerr, or for the time to expire
state['minlr'] = float(5e-7)
# Layers
# Input
# Input weights are sampled from a gaussian with std=scale; this is the
# standard way to initialize
state['rank_n_approx'] = 0
state['inp_nhids'] = '[400]'
state['inp_activ'] = '[linear]'
state['inp_bias'] = '[0.]'
state['inp_sparse']= -1 # dense
state['inp_scale'] = .1
# This is for the output weights
state['out_scale'] = .1
state['out_bias_scale'] = -.5
state['out_sparse'] = -1
# HidLayer
# Hidden units on for the internal layers of DT-RNN. Having a single
# value results in a standard RNN
state['nhids'] = '[200, 200]'
# Activation of each layer
state['rec_activ'] = '"TT.nnet.sigmoid"'
state['rec_bias'] = '.0'
state['rec_sparse'] ='20'
state['rec_scale'] = '1.'
# sample_weights - you rescale the weights such that the largest
# singular value is scale
# sample_weights_classic : just sample weights from a gaussian with std
# equal to scale
state['rec_init'] = "'sample_weights'"
state['rec_layer'] = 'RecurrentMultiLayerShortPathInpAll'
# SGD params
state['bs'] = 1 # the size of the minibatch
state['lr'] = 1. # initial learn_ing rate
state['cutoff'] = 1. # threshold for gradient rescaling
state['moment'] = 0.995 #-.1 # momentum
# Do not optimize these
state['weight_noise'] = True # white Gaussian noise in weights
state['weight_noise_amount'] = 0.075 # standard deviation
# maximal number of updates
state['loopIters'] = int(1e8)
# maximal number of minutes to wait until killing job
state['timeStop'] = 48*60 # 48 hours
# Construct linear connections from input to output. These are factored
# (like the rank_n) to deal with the possible high dimensionality of the
# input, but it is a linear projection that feeds into the softmax
state['shortcut_inpout'] = False
state['shortcut_rank'] = 200
# Main Loop
# Make this to be a decently large value. Otherwise you waste a lot of
# memory keeping track of the training error (and other things) at each
# step + the stdout becomes extremely large
state['trainFreq'] = 100
state['hookFreq'] = 5000
state['validFreq'] = 1000
state['saveFreq'] = 15 # save every 15 minutes
state['prefix'] = 'model_' # prefix of the save files
state['reload'] = False # reload
state['overwrite'] = 1
# Threhold should be 1.004 for PPL, for entropy (which is what
# everything returns, it should be much smaller. Running value is 1.0002
# We should not hyperoptimize this
state['divide_lr'] = 2.
state['cost_threshold'] = 1.0002
state['patience'] = 1
state['validate_postprocess'] = 'lambda x:10**(x/numpy.log(10))'
state['truncate_gradient'] = 80 # truncated BPTT
state['lr_adapt'] = 0 # 1/(1 + n/n0) scheduling
state['lr_beta'] = 10*1900.
state['lr_start'] = 'on_error'
state['no_noise_bias'] = True # do not use weight noise for biases
state['carry_h0'] = True # carry over h0 across updates
state['sample_steps'] = 80
# Do not change these
state['minerr'] = -1
state['shift'] = 1 # n-step forward prediction
state['cutoff_rescale_length'] = False
jobman(state, None)
|
vseledkin/LV_groundhog
|
tutorials/DT_RNN_Tut.py
|
Python
|
bsd-3-clause
| 14,392
|
[
"Gaussian"
] |
3d1659a7331977117caebb2ffd87749e9efee429bf035371d7125ddb1296478b
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from math import exp
from unittest import TestCase
from scipy.constants import Boltzmann, Avogadro, pi
from lib import unidades
from lib.meos import MEoS
class Ethylene(MEoS):
"""Multiparameter equation of state for ehylene"""
name = "ethylene"
CASNumber = "74-85-1"
formula = "CH2=CH2"
synonym = "R-1150"
_refPropName = "ETHYLENE"
_coolPropName = "Ethylene"
rhoc = unidades.Density(214.24)
Tc = unidades.Temperature(282.35)
Pc = unidades.Pressure(5041.8, "kPa")
M = 28.05376 # g/mol
Tt = unidades.Temperature(103.989)
Tb = unidades.Temperature(169.379)
f_acent = 0.0866
momentoDipolar = unidades.DipoleMoment(0.0, "Debye")
id = 22
_Tr = unidades.Temperature(273.316763)
_rhor = unidades.Density(216.108926)
_w = 0.085703183
Fi1 = {"ao_log": [1, 3],
"pow": [0, 1],
# Using a custom integration parameters for reference state
# "ao_pow": [8.68815523, -4.47960564],
"ao_pow": [8.67604219, -4.46075924323],
"ao_exp": [2.49395851, 3.0027152, 2.5126584, 3.99064217],
"titao": [4.43266896, 5.74840149, 7.8027825, 15.5851154]}
CP1 = {"ao": 4.,
"ao_exp": [1]*12,
"exp": [4353.907145, 2335.2251475, 1930.913215, 1471.9256475,
4464.6972475, 1778.39697, 1365.4520425, 1356.8190475,
4469.013745, 1188.475645, 4300.6703425, 2077.67413]}
CP2 = {"ao": 3.554495281,
"an": [0.5603615762e6, -0.2141069802e5, 0.2532008897e3,
-0.9951927478e-2, 0.5108931070e-4, -0.1928667482e-7],
"pow": [-3, -2, -1, 1, 2, 3],
"ao_exp": [-0.2061703241e2], "exp": [3000]}
smukala = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethylene of Smukala et "
"al. (2000)",
"__doi__": {"autor": "Smukala, J., Span, R., Wagner, W.",
"title": "New equation of state for ethylene covering the "
"fluid region from the melting line to 450 K at "
"pressures up to 300 MPa",
"ref": "J. Phys. Chem. Ref. Data 29(5) (2000) 1053-1121",
"doi": "10.1063/1.1329318"},
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 550.0, "Pmax": 300000.0, "rhomax": 27.03,
"nr1": [1.861742910067, -3.0913708460844, -0.17384817095516,
0.08037098569284, 0.23682707317354, 0.021922786610247],
"d1": [1, 1, 1, 2, 2, 4],
"t1": [0.5, 1, 2.5, 0, 2, 0.5],
"nr2": [0.11827885813193, -0.021736384396776, 0.044007990661139,
0.12554058863881, -0.13167945577241, -0.0052116984575897,
0.00015236081265419, -2.4505335342756e-05, 0.28970524924022,
-0.18075836674288, 0.15057272878461, -0.14093151754458,
0.022755109070253, 0.014026070529061, 0.0061697454296214,
-0.00041286083451333, 0.012885388714785, -0.069128692157093,
0.10936225568483, -0.0081818875271794, -0.05641847211717,
0.0016517867750633, 0.0095904006517001, -0.0026236572984886],
"d2": [1, 1, 3, 4, 5, 7, 10, 11, 1, 1, 2, 2, 4, 4, 6, 7, 4, 5, 6, 6,
7, 8, 9, 10],
"t2": [1., 4., 1.25, 2.75, 2.25, 1., 0.75, 0.5, 2.5, 3.5, 4., 6., 1.5,
5., 4.5, 15., 20., 23., 22., 29., 19., 15., 13., 10.],
"c2": [1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 4, 4, 4,
4, 4, 4],
"gamma2": [1]*24,
"nr3": [-50.242414011355, 7484.6420119299, -6873.4299232625,
-935.77982814338, 941.33024786113],
"d3": [2, 2, 2, 3, 3],
"t3": [1., 0., 1., 2., 3.],
"alfa3": [25.]*5,
"beta3": [325, 300, 300, 300, 300],
"gamma3": [1.16, 1.19, 1.19, 1.19, 1.19],
"epsilon3": [1.]*5}
jahangiri = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethylene of Jahangiri "
"(1986)",
"__doi__": {"autor": "Jahangiri, M., Jacobsen, R.T, Stewart, R.B., "
"McCarty, R.D.",
"title": "Thermodynamic properties of ethylene from the "
"freezing line to 450 K at pressures to 260 MPa",
"ref": "J. Phys. Chem. Ref. Data 15(2) (1986) 293-734",
"doi": "10.1063/1.555753"},
"R": 8.31434,
"cp": CP1,
"ref": {"Tref": 300, "Pref": 101.325, "ho": 29645.9, "so": 219.39},
"M": 28.054, "Tc": 282.3452, "rhoc": 7.634,
"Tmin": Tt, "Tmax": 500.0, "Pmax": 260000.0, "rhomax": 26.67,
"nr1": [3.248937034, -10.17278862, 7.386604053, -1.568916359,
-0.08884514287, 0.06021068143, 0.1078324588, -0.02004025211,
0.001950491412, 0.06718006403, -0.04200451469, -0.001620507626,
0.0005555156795, 0.0007583671146, -0.0002878544021],
"d1": [1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 6, 6, 6],
"t1": [0.5, 1, 1.25, 1.75, 4, 2, 4, 5, 6, 0.25, 3, 0.25, 0.5, 2.5, 3],
"nr2": [0.06258987063, -0.06418431160, -0.1368693752, 0.5179207660,
-0.3026331319, 0.7757213872, -2.639890864, 2.927563554,
-1.066267599, -0.05380471540, 0.1277921080, -0.07450152310,
-0.01624304356, 0.1476032429, -0.2003910489, 0.2926905618,
-0.1389040901, 5.913513541, -38.00370130, 96.91940570,
-122.6256839, 77.02379476, -19.22684672, -0.003800045701,
0.01118003813, 0.002945841426],
"d2": [1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 8, 8, 8],
"t2": [0.5, 1, 0.5, 2, 4, 3, 4, 5, 6, 2, 3, 4, 1.5, 0.5, 1.5, 4, 5, 1,
2, 3, 4, 5, 6, 0.5, 1, 5],
"c2": [3, 3, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 3, 2, 2, 2, 2, 4, 4, 4, 4,
4, 4, 2, 2, 2],
"gamma2": [1]*26}
shortSpan = {
"__type__": "Helmholtz",
"__name__": "short Helmholtz equation of state for ethylene of Span "
"and Wagner (2003)",
"__doi__": {"autor": "Span, R., Wagner, W.",
"title": "Equations of state for technical applications. "
"II. Results for nonpolar fluids.",
"ref": "Int. J. Thermophys. 24 (1) (2003) 41-109",
"doi": "10.1023/A:1022310214958"},
"R": 8.31451,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 750.0, "Pmax": 100000.0, "rhomax": 27.03,
"M": 28.054,
"nr1": [0.9096223, -0.24641015e1, 0.56175311, -0.19688013e-1,
0.78831145e-1, 0.21478776e-3],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.23151337, -0.37804454e-1, -0.20122739, -0.44960157e-1,
-0.2834296e-1, 0.12652824e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
mccarty = {
# Also referenced in
# Younglove, B.A.",
# Thermophysical Properties of Fluids. I. Argon, Ethylene,
# Parahydrogen, Nitrogen, Nitrogen Trifluoride, and Oxygen
# J. Phys. Chem. Ref. Data, 11(Suppl. 1) (1982)
# Younglove use a different rhoc so the tabulated values may differ
"__type__": "MBWR",
"__name__": "MBWR equation of state for ethylene of McCarty (1981)",
"__doi__": {"autor": "McCarty, R.D., Jacobsen, R.T.",
"title": "An Equation of State for Fluid Ethylene",
"ref": "Natl. Bur. Stand., Tech. Note 1045, 1981.",
"doi": ""},
"R": 8.31434, "M": 28.054,
"Tt": 103.986, "Tc": 282.3428, "Pc": 5.0403, "rhoc": 7.634,
"cp": CP2,
"ref": {"Tref": 300, "Pref": 101.325, "so": 219.4, "ho": 29646.46},
"Tmin": Tt, "Tmax": 500.0, "Pmax": 40000.0, "rhomax": 23.343,
"gamma": -0.0172,
"b": [None, -0.2146684366683e-1, 0.1791433722534e1, -0.3675315603930e2,
0.3707178934669e4, -0.3198282566709e6, 0.5809379774732e-3,
-0.7895570824899, 0.1148620375835e3, 0.2713774629193e6,
-0.8647124319107e-4, 0.1617727266385, -0.2731527496271e2,
-0.2672283641459e-2, -0.4752381331990e-1, -0.6255637346217e2,
0.4576234964434e-2, -0.7534839269320e-4, 0.1638171982209,
-0.3563090740740e-2, -0.1833000783170e6, -0.1805074209985e8,
-0.4794587918874e4, 0.3531948274957e8, -0.2562571039155e2,
0.1044308253292e4, -0.1695303363659, -0.1710334224958e4,
-0.2054114462372e-3, 0.6727558766661e-1, -0.1557168403328e-5,
-0.1229814736077e-3, 0.4234325938573e-3]}
sun = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for ethylene of Sun and Ely "
"(2004)",
"__doi__": {"autor": "Sun, L., Ely, J.F.",
"title": "Universal equation of state for engineering "
"application: Algorithm and application to "
"non-polar and polar fluids",
"ref": "Fluid Phase Equilib., 222-223 (2004) 107-118",
"doi": "10.1016/j.fluid.2004.06.028"},
"R": 8.31451,
"cp": CP1,
"ref": {"Tref": 298.15, "Pref": 101.325, "ho": 29610, "so": 219.225},
"Tmin": Tt, "Tmax": 620.0, "Pmax": 800000.0, "rhomax": 40.,
"nr1": [8.42278605e-1, 8.65139678e-1, -2.79801027, 6.74520156e-2,
2.42445468e-4, -2.74767618e-3],
"d1": [1, 1, 1, 3, 7, 2],
"t1": [1.5, 0.25, 1.25, 0.25, 0.875, 1.375],
"nr2": [-1.48602227e-2, 1.29307481e-1, 3.74759088e-1, -1.25336440e-2,
-2.33507187e-1, 1.38862785e-2, -4.88033330e-2, -2.38141707e-2],
"d2": [1, 1, 2, 5, 1, 1, 4, 2],
"t2": [0, 2.375, 2., 2.125, 3.5, 6.5, 4.75, 12.5],
"c2": [1, 1, 1, 1, 2, 2, 2, 3],
"gamma2": [1]*8}
eq = smukala, mccarty, jahangiri, shortSpan, sun
_PR = [-0.2301, -15.7070]
_surface = {"sigma": [0.0477], "exp": [1.17]}
_dielectric = {
"eq": 1,
"a": [10.725, 0], "b": [55.19, 49.5], "c": [-2045., -1154.],
"Au": 0, "D": 1.9}
_melting = {
"__doi__": smukala["__doi__"],
"Tmin": Tt, "Tmax": 550.0}
@classmethod
def _Melting_Pressure(cls, T):
"""Calculate the melting pressure using the fractional method of
Smukala"""
Tt2 = 110.369
if T < Tt2:
a = 2947001.84
t = 2.045
Tt = cls.Tt
Pt = 122.65
else:
a = 6.82693421
t = 1.089
Tt = Tt2
Pt = 46.8e6
Pr = 1 + a*((T/Tt)**t-1)
return unidades.Pressure(Pr*Pt)
_vapor_Pressure = {
"eq": 3,
"n": [-6.3905741, 1.4060338, -1.6589923, 1.0278028, -2.5071716],
"t": [1.0, 1.5, 2.5, 3.0, 4.5]}
_liquid_Density = {
"eq": 2,
"n": [1.8673079, -0.61533892, -0.058973772, 0.10744720],
"t": [0.343, 3/6, 8/6, 12/6]}
_vapor_Density = {
"eq": 2,
"n": [-1.9034556, -0.75837929, -3.7717969, -8.7478586, -23.885296,
-54.197979],
"t": [0.349, 4/6, 1, 14/6, 29/6, 56/6]}
visco0 = {"__name__": "Holland (1983)",
"__doi__": {
"autor": "Holland, P.M., Eaton, B.E., Hanley, H.J.M.",
"title": "A Correlation of the Viscosity and Thermal "
"Conductivity Data of Gaseous and Liquid Ethylene",
"ref": "J. Phys. Chem. Ref. Data 12(4) (1983) 917-932",
"doi": "10.1063/1.555701"},
"eq": 1, "omega": 0,
"no": [-3.5098225018e5, 2.5008406184e5, -5.8365540744e4,
4.5549146583e2, 2.2881683403e3, -4.7318682077e2,
4.5022249258e1, -2.1490688088, 4.1649263233e-2],
"to": [-1, -2/3, -1/3, 0, 1/3, 2/3, 1, 4/3, 5/3],
"special": "_mur"}
def _mur(self, rho, T, fase):
"""Density correction for viscosity correlation"""
# η1 in Eq 3 is always 0
# Eq 4
tita = (rho-221)/221
j = [-4.8544486732, 1.3033585236e1, 2.7808928908e4, -1.8241971308e3,
1.5913024509, -2.0513573927e2, -3.9478454708e4]
mu2 = exp(j[0]+j[3]/T) * (exp(rho.gcc**0.1*(j[1]+j[2]/T**1.5) +
tita*rho.gcc**0.5*(j[4]+j[5]/T+j[6]/T**2))-1)
# The reurned values is in microP, convert to μPas
return mu2/10
_viscosity = visco0,
thermo0 = {"__name__": "Assael (2016)",
"__doi__": {
"autor": "Assael, M.J., Koutian, A., Huber, M.L., Perkins, "
"R.A.",
"title": "Reference Correlations of the Thermal "
"Conductivity of Ethene and Propene",
"ref": "J. Phys. Chem. Ref. Data 45(3) (2016) 033104",
"doi": "10.1063/1.4958984"},
"eq": 1,
"Toref": 282.35, "koref": 1e-3,
"no_num": [-54.1761, 541.904, -656.108, 667.048, -109.992,
60.6511, -1.01377],
"to_num": [0, 1, 2, 3, 4, 5, 6],
"no_den": [26.5363, -20.1401, 19.4152, -2.92695, 1],
"to_den": [0, 1, 2, 3, 4],
"Tref_res": 282.35, "rhoref_res": 214.24, "kref_res": 1e-3,
"nr": [0.261453e2, -0.218619e2, 0.362068e2, -0.136642e2,
0.184752e1, -0.113225e2, 0.269282e2, -0.223164e2,
0.390241e1, 0.668286],
"tr": [0, 0, 0, 0, 0, -1, -1, -1, -1, -1],
"dr": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
"critical": 3,
"gnu": 0.63, "gamma": 1.239, "R0": 1.02, "Xio": 0.181e-9,
"gam0": 0.058, "qd": 0.49e-9, "Tcref": 423.53}
thermo1 = {"__name__": "Holland (1983)",
"__doi__": {
"autor": "Holland, P.M., Eaton, B.E., Hanley, H.J.M.",
"title": "A Correlation of the Viscosity and Thermal "
"Conductivity Data of Gaseous and Liquid Ethylene",
"ref": "J. Phys. Chem. Ref. Data 12(4) (1983) 917-932",
"doi": "10.1063/1.555701"},
"eq": 0,
"method": "_thermo0"}
def _thermo0(self, rho, T, fase):
# λ1 in Eq 3 is always 0
GT = [-2.903423528e5, 4.680624952e5, -1.8954783215e5, -4.8262235392e3,
2.243409372e4, -6.6206354818e3, 8.9937717078e2, -6.0559143718e1,
1.6370306422]
lo = 0
for i in range(-3, 6):
lo += GT[i+3]*T**(i/3.)
l2, lc = 0, 0
if rho:
tita = (rho-221)/221
k = [-1.304503323e1, 1.8214616599e1, -9.903022496e3, 7.420521631e2,
-3.0083271933e-1, 9.6456068829e1, 1.350256962e4]
l2 = exp(k[0]+k[3]/T) * (
exp(rho.gcc**0.1*(k[1]+k[2]/T**1.5) +
tita*rho.gcc**0.5*(k[4]+k[5]/T+k[6]/T**2))-1)
# Critical enhancement
deltarho = (rho-221)/221
deltaT = (T-282.34)/282.34
xt = rho**2*fase.kappa*5.039/221**2
B = abs(deltarho)/abs(deltaT)**1.19 # Eq 11
Gamma = xt*abs(deltaT)**1.19 # Eq 12
xi = 0.69/(B**2*5.039/Gamma/Boltzmann/282.34)**0.5 # Eq 14
# Eq 19
F = exp(-18.66*deltaT**2) * exp(-4.25*deltarho**4)
# Eq 18
c = (self.M/rho.gcc/Avogadro/Boltzmann/T)**0.5
d = Boltzmann*T**2/6/pi/fase.mu.muPas/xi
lc = c*d*fase.dpdT_rho**2*fase.kappa**0.5*F
return unidades.ThermalConductivity(lo+l2+lc, "mWmK")
_thermal = thermo0, thermo1
class Test(TestCase):
def test_smukala(self):
# Zero enthalpy-entropy reference state
st = Ethylene(T=298.15, P=101325)
self.assertEqual(round(st.h.kJkg, 2), 0)
self.assertEqual(round(st.s.kJkgK, 3), 0)
# Selected values from Table 32, Pag 1093, saturation state
# Using custom parametr for reference state, the enthalpy and entropy
# values are diferent to table
st = Ethylene(T=103.989, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.000122)
self.assertEqual(round(st.Liquido.rho, 2), 654.60)
self.assertEqual(round(st.Liquido.cv.kJkgK, 4), 1.6220)
self.assertEqual(round(st.Liquido.cp.kJkgK, 4), 2.4294)
self.assertEqual(round(st.Liquido.w, 1), 1766.6)
self.assertEqual(round(st.Gas.rho, 5), 0.00396)
self.assertEqual(round(st.Gas.cv.kJkgK, 5), 0.89014)
self.assertEqual(round(st.Gas.cp.kJkgK, 4), 1.1868)
self.assertEqual(round(st.Gas.w, 2), 202.67)
st = Ethylene(T=150, x=0.5)
self.assertEqual(round(st.P.MPa, 6), 0.027377)
self.assertEqual(round(st.Liquido.rho, 2), 594.60)
self.assertEqual(round(st.Liquido.cv.kJkgK, 4), 1.4275)
self.assertEqual(round(st.Liquido.cp.kJkgK, 4), 2.4038)
self.assertEqual(round(st.Liquido.w, 1), 1449.4)
self.assertEqual(round(st.Gas.rho, 5), 0.62385)
self.assertEqual(round(st.Gas.cv.kJkgK, 5), 0.91795)
self.assertEqual(round(st.Gas.cp.kJkgK, 4), 1.2320)
self.assertEqual(round(st.Gas.w, 2), 241.10)
st = Ethylene(T=200, x=0.5)
self.assertEqual(round(st.P.MPa, 5), 0.45548)
self.assertEqual(round(st.Liquido.rho, 2), 521.22)
self.assertEqual(round(st.Liquido.cv.kJkgK, 4), 1.3214)
self.assertEqual(round(st.Liquido.cp.kJkgK, 4), 2.5287)
self.assertEqual(round(st.Liquido.w, 1), 1069.9)
self.assertEqual(round(st.Gas.rho, 4), 8.4936)
self.assertEqual(round(st.Gas.cv.kJkgK, 4), 1.0431)
self.assertEqual(round(st.Gas.cp.kJkgK, 4), 1.4920)
self.assertEqual(round(st.Gas.w, 2), 261.94)
st = Ethylene(T=250, x=0.5)
self.assertEqual(round(st.P.MPa, 4), 2.3295)
self.assertEqual(round(st.Liquido.rho, 2), 422.02)
self.assertEqual(round(st.Liquido.cv.kJkgK, 4), 1.3680)
self.assertEqual(round(st.Liquido.cp.kJkgK, 4), 3.3629)
self.assertEqual(round(st.Liquido.w, 1), 628.10)
self.assertEqual(round(st.Gas.rho, 3), 44.970)
self.assertEqual(round(st.Gas.cv.kJkgK, 5), 1.3344)
self.assertEqual(round(st.Gas.cp.kJkgK, 4), 2.6609)
self.assertEqual(round(st.Gas.w, 2), 248.80)
st = Ethylene(T=282, x=0.5)
self.assertEqual(round(st.P.MPa, 4), 5.0022)
self.assertEqual(round(st.Liquido.rho, 2), 253.12)
self.assertEqual(round(st.Liquido.cv.kJkgK, 4), 2.2089)
self.assertEqual(round(st.Liquido.cp.kJkgK, 2), 146.97)
self.assertEqual(round(st.Liquido.w, 2), 188.89)
self.assertEqual(round(st.Gas.rho, 2), 175.80)
self.assertEqual(round(st.Gas.cv.kJkgK, 4), 2.3981)
self.assertEqual(round(st.Gas.cp.kJkgK, 2), 225.24)
self.assertEqual(round(st.Gas.w, 2), 191.32)
# Selected values from Table 33, Pag 1097, single phase region
st = Ethylene(T=104.003, P=1e5)
self.assertEqual(round(st.rho, 2), 654.63)
self.assertEqual(round(st.cv.kJkgK, 4), 1.6219)
self.assertEqual(round(st.cp.kJkgK, 4), 2.4293)
self.assertEqual(round(st.w, 1), 1767.0)
st = Ethylene(T=205, P=5e5)
self.assertEqual(round(st.rho, 4), 9.1140)
self.assertEqual(round(st.cv.kJkgK, 4), 1.0511)
self.assertEqual(round(st.cp.kJkgK, 4), 1.5024)
self.assertEqual(round(st.w, 2), 264.57)
st = Ethylene(T=450, P=1e6)
self.assertEqual(round(st.rho, 4), 7.6018)
self.assertEqual(round(st.cv.kJkgK, 4), 1.7717)
self.assertEqual(round(st.cp.kJkgK, 4), 2.0933)
self.assertEqual(round(st.w, 2), 391.57)
st = Ethylene(T=180, P=2e6)
self.assertEqual(round(st.rho, 2), 554.35)
self.assertEqual(round(st.cv.kJkgK, 4), 1.3484)
self.assertEqual(round(st.cp.kJkgK, 4), 2.4263)
self.assertEqual(round(st.w, 1), 1244.3)
st = Ethylene(T=104.690, P=5e6)
self.assertEqual(round(st.rho, 2), 656.09)
self.assertEqual(round(st.cv.kJkgK, 4), 1.6202)
self.assertEqual(round(st.cp.kJkgK, 4), 2.4226)
self.assertEqual(round(st.w, 1), 1789.1)
st = Ethylene(T=255, P=1e7)
self.assertEqual(round(st.rho, 2), 444.77)
self.assertEqual(round(st.cv.kJkgK, 4), 1.3569)
self.assertEqual(round(st.cp.kJkgK, 4), 2.8116)
self.assertEqual(round(st.w, 2), 773.66)
st = Ethylene(T=450, P=2e7)
self.assertEqual(round(st.rho, 2), 176.61)
self.assertEqual(round(st.cv.kJkgK, 4), 1.8596)
self.assertEqual(round(st.cp.kJkgK, 4), 2.7794)
self.assertEqual(round(st.w, 2), 421.74)
st = Ethylene(T=340, P=5e7)
self.assertEqual(round(st.rho, 2), 430.74)
self.assertEqual(round(st.cv.kJkgK, 4), 1.5848)
self.assertEqual(round(st.cp.kJkgK, 4), 2.4990)
self.assertEqual(round(st.w, 2), 889.85)
st = Ethylene(T=127.136, P=1e8)
self.assertEqual(round(st.rho, 2), 670.20)
self.assertEqual(round(st.cv.kJkgK, 4), 1.6349)
self.assertEqual(round(st.cp.kJkgK, 4), 2.3202)
self.assertEqual(round(st.w, 1), 2003.4)
st = Ethylene(T=320, P=2e8)
self.assertEqual(round(st.rho, 2), 576.41)
self.assertEqual(round(st.cv.kJkgK, 4), 1.6686)
self.assertEqual(round(st.cp.kJkgK, 4), 2.2550)
self.assertEqual(round(st.w, 1), 1668.7)
def test_mccarty(self):
# Selected values from Table B-1, Pag 74, saturation state
st = Ethylene(T=104, x=0.5, eq="mccarty")
self.assertEqual(round(st.P.MPa, 5), 0.00012)
self.assertEqual(round(st.Liquido.rhoM, 3), 23.389)
self.assertEqual(round(st.Liquido.dpdrho_T.MPakgm3*st.M, 3), 46.027)
self.assertEqual(round(st.Liquido.dpdT_rho.MPaK, 3), 2.907)
self.assertEqual(round(st.Liquido.uM.Jmol, 2), 6615.72)
self.assertEqual(round(st.Liquido.hM.Jmol, 2), 6615.73)
self.assertEqual(round(st.Liquido.sM.JmolK, 1), 84.4)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 39.49)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 74.39)
self.assertEqual(round(st.Liquido.w, 0), 1758)
self.assertEqual(round(st.Gas.rhoM, 6), 0.000141)
self.assertEqual(round(st.Gas.dpdrho_T.MPakgm3*st.M, 3), 0.864)
self.assertEqual(round(st.Gas.dpdT_rho.MPaK, 3), 0.000)
self.assertEqual(round(st.Gas.uM.Jmol, 2), 21685.99)
self.assertEqual(round(st.Gas.hM.Jmol, 2), 22550.47)
self.assertEqual(round(st.Gas.sM.JmolK, 1), 237.6)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 24.99)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 33.31)
self.assertEqual(round(st.Gas.w, 0), 203)
st = Ethylene(T=125, x=0.5, eq="mccarty")
self.assertEqual(round(st.P.MPa, 5), 0.00252)
self.assertEqual(round(st.Liquido.rhoM, 3), 22.345)
self.assertEqual(round(st.Liquido.dpdrho_T.MPakgm3*st.M, 3), 47.453)
self.assertEqual(round(st.Liquido.dpdT_rho.MPaK, 3), 2.136)
self.assertEqual(round(st.Liquido.uM.Jmol, 2), 8067.58)
self.assertEqual(round(st.Liquido.hM.Jmol, 2), 8067.69)
self.assertEqual(round(st.Liquido.sM.JmolK, 1), 97.1)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 43.19)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 67.25)
self.assertEqual(round(st.Liquido.w, 0), 1623)
self.assertEqual(round(st.Gas.rhoM, 6), 0.002432)
self.assertEqual(round(st.Gas.dpdrho_T.MPakgm3*st.M, 3), 1.034)
self.assertEqual(round(st.Gas.dpdT_rho.MPaK, 3), 0.000)
self.assertEqual(round(st.Gas.uM.Jmol, 2), 22204.76)
self.assertEqual(round(st.Gas.hM.Jmol, 2), 23241.57)
self.assertEqual(round(st.Gas.sM.JmolK, 1), 218.5)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 25.19)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 33.61)
self.assertEqual(round(st.Gas.w, 0), 222)
st = Ethylene(T=150, x=0.5, eq="mccarty")
self.assertEqual(round(st.P.MPa, 5), 0.02735)
self.assertEqual(round(st.Liquido.rhoM, 3), 21.202)
self.assertEqual(round(st.Liquido.dpdrho_T.MPakgm3*st.M, 3), 35.511)
self.assertEqual(round(st.Liquido.dpdT_rho.MPaK, 3), 1.687)
self.assertEqual(round(st.Liquido.uM.Jmol, 2), 9738.07)
self.assertEqual(round(st.Liquido.hM.Jmol, 2), 9739.36)
self.assertEqual(round(st.Liquido.sM.JmolK, 1), 109.3)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 40.14)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 66.88)
self.assertEqual(round(st.Liquido.w, 0), 1452)
self.assertEqual(round(st.Gas.rhoM, 4), 0.0222)
self.assertEqual(round(st.Gas.dpdrho_T.MPakgm3*st.M, 3), 1.213)
self.assertEqual(round(st.Gas.dpdT_rho.MPaK, 3), 0.000)
self.assertEqual(round(st.Gas.uM.Jmol, 2), 22798.71)
self.assertEqual(round(st.Gas.hM.Jmol, 2), 24029.05)
self.assertEqual(round(st.Gas.sM.JmolK, 1), 204.6)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 25.97)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 34.87)
self.assertEqual(round(st.Gas.w, 0), 241)
st = Ethylene(T=175, x=0.5, eq="mccarty")
self.assertEqual(round(st.P.MPa, 5), 0.13956)
self.assertEqual(round(st.Liquido.rhoM, 3), 19.955)
self.assertEqual(round(st.Liquido.dpdrho_T.MPakgm3*st.M, 3), 24.970)
self.assertEqual(round(st.Liquido.dpdT_rho.MPaK, 3), 1.317)
self.assertEqual(round(st.Liquido.uM.Jmol, 2), 11423.46)
self.assertEqual(round(st.Liquido.hM.Jmol, 2), 11430.45)
self.assertEqual(round(st.Liquido.sM.JmolK, 1), 119.7)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 37.83)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 68.34)
self.assertEqual(round(st.Liquido.w, 0), 1268)
self.assertEqual(round(st.Gas.rhoM, 6), 0.100137)
self.assertEqual(round(st.Gas.dpdrho_T.MPakgm3*st.M, 3), 1.332)
self.assertEqual(round(st.Gas.dpdT_rho.MPaK, 3), 0.001)
self.assertEqual(round(st.Gas.uM.Jmol, 2), 23341.06)
self.assertEqual(round(st.Gas.hM.Jmol, 2), 24734.75)
self.assertEqual(round(st.Gas.sM.JmolK, 1), 195.7)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 27.56)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 37.69)
self.assertEqual(round(st.Gas.w, 0), 255)
st = Ethylene(T=225, x=0.5, eq="mccarty")
self.assertEqual(round(st.P.MPa, 4), 1.1279)
self.assertEqual(round(st.Liquido.rhoM, 3), 17.006)
self.assertEqual(round(st.Liquido.dpdrho_T.MPakgm3*st.M, 3), 10.002)
self.assertEqual(round(st.Liquido.dpdT_rho.MPaK, 3), 0.721)
self.assertEqual(round(st.Liquido.uM.Jmol, 2), 14966.73)
self.assertEqual(round(st.Liquido.hM.Jmol, 2), 15033.06)
self.assertEqual(round(st.Liquido.sM.JmolK, 1), 137.5)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 37.13)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 77.58)
self.assertEqual(round(st.Liquido.w, 0), 863)
self.assertEqual(round(st.Gas.rhoM, 5), 0.73325)
self.assertEqual(round(st.Gas.dpdrho_T.MPakgm3*st.M, 3), 1.208)
self.assertEqual(round(st.Gas.dpdT_rho.MPaK, 3), 0.007)
self.assertEqual(round(st.Gas.uM.Jmol, 2), 24130.08)
self.assertEqual(round(st.Gas.hM.Jmol, 2), 25668.30)
self.assertEqual(round(st.Gas.sM.JmolK, 1), 184.8)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 33.21)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 52.17)
self.assertEqual(round(st.Gas.w, 0), 260)
# Table B-2, pag 81, single phase region
st = Ethylene(T=150, P=5e4, eq="mccarty")
self.assertEqual(round(st.rhoM, 3), 21.203)
self.assertEqual(round(st.dpdrho_T.MPakgm3*st.M, 3), 35.52)
self.assertEqual(round(st.dpdT_rho.MPaK, 3), 1.687)
self.assertEqual(round(st.uM.Jmol, 2), 9737.71)
self.assertEqual(round(st.hM.Jmol, 2), 9740.07)
self.assertEqual(round(st.sM.JmolK, 1), 109.3)
self.assertEqual(round(st.cvM.JmolK, 2), 40.14)
self.assertEqual(round(st.cpM.JmolK, 2), 66.87)
self.assertEqual(round(st.w, 0), 1452)
st = Ethylene(T=300, P=101325, eq="mccarty")
self.assertEqual(round(st.rhoM, 6), 0.040847)
self.assertEqual(round(st.dpdrho_T.MPakgm3*st.M, 3), 2.467)
self.assertEqual(round(st.dpdT_rho.MPaK, 6), 0.000342)
self.assertEqual(round(st.uM.Jmol, 2), 27165.87)
self.assertEqual(round(st.hM.Jmol, 2), 29646.46)
self.assertEqual(round(st.sM.JmolK, 1), 219.4)
self.assertEqual(round(st.cvM.JmolK, 2), 34.81)
self.assertEqual(round(st.cpM.JmolK, 2), 43.32)
self.assertEqual(round(st.w, 0), 331)
st = Ethylene(T=450, P=1e6, eq="mccarty")
self.assertEqual(round(st.rhoM, 5), 0.27072)
self.assertEqual(round(st.dpdrho_T.MPakgm3*st.M, 3), 3.646)
self.assertEqual(round(st.dpdT_rho.MPaK, 5), 0.00232)
self.assertEqual(round(st.uM.Jmol, 2), 33363.38)
self.assertEqual(round(st.hM.Jmol, 2), 37057.24)
self.assertEqual(round(st.sM.JmolK, 1), 220.4)
self.assertEqual(round(st.cvM.JmolK, 2), 49.68)
self.assertEqual(round(st.cpM.JmolK, 2), 58.74)
self.assertEqual(round(st.w, 0), 392)
st = Ethylene(T=110, P=1e7, eq="mccarty")
self.assertEqual(round(st.rhoM, 3), 23.252)
self.assertEqual(round(st.dpdrho_T.MPakgm3*st.M, 3), 49.352)
self.assertEqual(round(st.dpdT_rho.MPaK, 2), 2.63)
self.assertEqual(round(st.uM.Jmol, 2), 6939.63)
self.assertEqual(round(st.hM.Jmol, 2), 7369.71)
self.assertEqual(round(st.sM.JmolK, 1), 87.4)
self.assertEqual(round(st.cvM.JmolK, 2), 43.54)
self.assertEqual(round(st.cpM.JmolK, 2), 72.05)
self.assertEqual(round(st.w, 0), 1706)
st = Ethylene(T=450, P=4e7, eq="mccarty")
self.assertEqual(round(st.rhoM, 3), 10.569)
self.assertEqual(round(st.dpdrho_T.MPakgm3*st.M, 3), 6.793)
self.assertEqual(round(st.dpdT_rho.MPaK, 3), 0.202)
self.assertEqual(round(st.uM.Jmol, 2), 28192.71)
self.assertEqual(round(st.hM.Jmol, 2), 31977.53)
self.assertEqual(round(st.sM.JmolK, 1), 180.7)
self.assertEqual(round(st.cvM.JmolK, 2), 53.35)
self.assertEqual(round(st.cpM.JmolK, 2), 77.55)
self.assertEqual(round(st.w, 0), 593)
def test_jahangiri(self):
# Table 24, Pag 635, Second virial coefficients
st = Ethylene(T=200.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -310.248)
st = Ethylene(T=250.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -199.921)
st = Ethylene(T=300.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -138.087)
st = Ethylene(T=350.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -98.356)
st = Ethylene(T=400.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -70.599)
st = Ethylene(T=450.15, P=101325, eq="jahangiri")
self.assertEqual(round(st.virialB.ccg*st.M, 3), -50.099)
# # Table 27, Pag 646, saturation states
st = Ethylene(T=104, x=0.5, eq="jahangiri")
self.assertEqual(round(st.P.MPa, 5), 0.00012)
self.assertEqual(round(st.Liquido.rhoM, 3), 23.347)
self.assertEqual(round(st.Liquido.hM.Jmol, 1), 6613.4)
self.assertEqual(round(st.Liquido.sM.JmolK, 2), 84.32)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 39.00)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 61.67)
self.assertEqual(round(st.Liquido.w, 0), 1822)
self.assertEqual(round(st.Gas.rhoM, 5), 0.00014)
self.assertEqual(round(st.Gas.hM.Jmol, 0), 22552)
self.assertEqual(round(st.Gas.sM.JmolK, 2), 237.57)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 24.97)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 33.29)
self.assertEqual(round(st.Gas.w, 0), 203)
st = Ethylene(T=150, x=0.5, eq="jahangiri")
self.assertEqual(round(st.P.MPa, 5), 0.02747)
self.assertEqual(round(st.Liquido.rhoM, 3), 21.206)
self.assertEqual(round(st.Liquido.hM.Jmol, 1), 9780.0)
self.assertEqual(round(st.Liquido.sM.JmolK, 2), 109.54)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 39.29)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 66.68)
self.assertEqual(round(st.Liquido.w, 0), 1457)
self.assertEqual(round(st.Gas.rhoM, 5), 0.02231)
self.assertEqual(round(st.Gas.hM.Jmol, 0), 24039)
self.assertEqual(round(st.Gas.sM.JmolK, 2), 204.60)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 25.76)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 34.56)
self.assertEqual(round(st.Gas.w, 0), 241)
st = Ethylene(T=200, x=0.5, eq="jahangiri")
self.assertEqual(round(st.P.MPa, 5), 0.45596)
self.assertEqual(round(st.Liquido.rhoM, 3), 18.589)
self.assertEqual(round(st.Liquido.hM.Jmol, 0), 13190)
self.assertEqual(round(st.Liquido.sM.JmolK, 2), 129.01)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 37.13)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 71.01)
self.assertEqual(round(st.Liquido.w, 0), 1070)
self.assertEqual(round(st.Gas.rhoM, 5), 0.30297)
self.assertEqual(round(st.Gas.hM.Jmol, 0), 25325)
self.assertEqual(round(st.Gas.sM.JmolK, 2), 189.68)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 29.46)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 42.12)
self.assertEqual(round(st.Gas.w, 0), 262)
st = Ethylene(T=250, x=0.5, eq="jahangiri")
self.assertEqual(round(st.P.MPa, 4), 2.3307)
self.assertEqual(round(st.Liquido.rhoM, 3), 15.048)
self.assertEqual(round(st.Liquido.hM.Jmol, 0), 17143)
self.assertEqual(round(st.Liquido.sM.JmolK, 2), 146.08)
self.assertEqual(round(st.Liquido.cvM.JmolK, 2), 38.57)
self.assertEqual(round(st.Liquido.cpM.JmolK, 2), 94.60)
self.assertEqual(round(st.Liquido.w, 0), 627)
self.assertEqual(round(st.Gas.rhoM, 4), 1.6042)
self.assertEqual(round(st.Gas.hM.Jmol, 0), 25672)
self.assertEqual(round(st.Gas.sM.JmolK, 2), 180.19)
self.assertEqual(round(st.Gas.cvM.JmolK, 2), 37.48)
self.assertEqual(round(st.Gas.cpM.JmolK, 2), 74.66)
self.assertEqual(round(st.Gas.w, 0), 249)
# Table 28, pag 656, single phase values
st = Ethylene(T=105, P=1e4, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 23.302)
self.assertEqual(round(st.uM.Jmol, 1), 6676.1)
self.assertEqual(round(st.hM.Jmol, 1), 6676.5)
self.assertEqual(round(st.sM.JmolK, 2), 84.92)
self.assertEqual(round(st.cvM.JmolK, 2), 40.86)
self.assertEqual(round(st.cpM.JmolK, 2), 63.76)
self.assertEqual(round(st.w, 0), 1801)
st = Ethylene(T=160, P=5e4, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.03831)
self.assertEqual(round(st.uM.Jmol, 0), 23043)
self.assertEqual(round(st.hM.Jmol, 0), 24348)
self.assertEqual(round(st.sM.JmolK, 2), 201.69)
self.assertEqual(round(st.cvM.JmolK, 2), 26.13)
self.assertEqual(round(st.cpM.JmolK, 2), 35.17)
self.assertEqual(round(st.w, 0), 248)
st = Ethylene(T=450, P=1e5, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.02676)
self.assertEqual(round(st.uM.Jmol, 0), 33502)
self.assertEqual(round(st.hM.Jmol, 0), 37238)
self.assertEqual(round(st.sM.JmolK, 2), 239.82)
self.assertEqual(round(st.cvM.JmolK, 2), 49.54)
self.assertEqual(round(st.cpM.JmolK, 2), 57.93)
self.assertEqual(round(st.w, 0), 394)
st = Ethylene(T=300, P=101325, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.04085)
self.assertEqual(round(st.uM.Jmol, 0), 27166)
self.assertEqual(round(st.hM.Jmol, 0), 29646)
self.assertEqual(round(st.sM.JmolK, 2), 219.39)
self.assertEqual(round(st.cvM.JmolK, 2), 34.77)
self.assertEqual(round(st.cpM.JmolK, 2), 43.29)
self.assertEqual(round(st.w, 0), 331)
st = Ethylene(T=180, P=2e5, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 19.694)
self.assertEqual(round(st.uM.Jmol, 0), 11786)
self.assertEqual(round(st.hM.Jmol, 0), 11796)
self.assertEqual(round(st.sM.JmolK, 2), 121.74)
self.assertEqual(round(st.cvM.JmolK, 2), 37.35)
self.assertEqual(round(st.cpM.JmolK, 2), 68.10)
self.assertEqual(round(st.w, 0), 1231)
st = Ethylene(T=195, P=3e5, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.19795)
self.assertEqual(round(st.uM.Jmol, 0), 23794)
self.assertEqual(round(st.hM.Jmol, 0), 25309)
self.assertEqual(round(st.sM.JmolK, 2), 192.81)
self.assertEqual(round(st.cvM.JmolK, 2), 28.41)
self.assertEqual(round(st.cpM.JmolK, 2), 39.43)
self.assertEqual(round(st.w, 0), 264)
st = Ethylene(T=160, P=5e5, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 20.731)
self.assertEqual(round(st.uM.Jmol, 0), 10436)
self.assertEqual(round(st.hM.Jmol, 0), 10460)
self.assertEqual(round(st.sM.JmolK, 2), 113.78)
self.assertEqual(round(st.cvM.JmolK, 2), 38.28)
self.assertEqual(round(st.cpM.JmolK, 2), 66.60)
self.assertEqual(round(st.w, 0), 1387)
st = Ethylene(T=288, P=8e5, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.35246)
self.assertEqual(round(st.uM.Jmol, 0), 26525)
self.assertEqual(round(st.hM.Jmol, 0), 28794)
self.assertEqual(round(st.sM.JmolK, 2), 199.66)
self.assertEqual(round(st.cvM.JmolK, 2), 34.21)
self.assertEqual(round(st.cpM.JmolK, 2), 44.56)
self.assertEqual(round(st.w, 0), 316)
st = Ethylene(T=220, P=1e6, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 17.360)
self.assertEqual(round(st.uM.Jmol, 0), 14606)
self.assertEqual(round(st.hM.Jmol, 0), 14663)
self.assertEqual(round(st.sM.JmolK, 2), 135.88)
self.assertEqual(round(st.cvM.JmolK, 2), 37.32)
self.assertEqual(round(st.cpM.JmolK, 2), 76.00)
self.assertEqual(round(st.w, 0), 902)
st = Ethylene(T=244, P=2e6, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 15.582)
self.assertEqual(round(st.uM.Jmol, 0), 16479)
self.assertEqual(round(st.hM.Jmol, 0), 16607)
self.assertEqual(round(st.sM.JmolK, 2), 143.99)
self.assertEqual(round(st.cvM.JmolK, 2), 38.18)
self.assertEqual(round(st.cpM.JmolK, 2), 88.69)
self.assertEqual(round(st.w, 0), 685)
st = Ethylene(T=450, P=3e6, eq="jahangiri")
self.assertEqual(round(st.rhoM, 5), 0.83490)
self.assertEqual(round(st.uM.Jmol, 0), 33027)
self.assertEqual(round(st.hM.Jmol, 0), 36621)
self.assertEqual(round(st.sM.JmolK, 2), 210.49)
self.assertEqual(round(st.cvM.JmolK, 2), 49.92)
self.assertEqual(round(st.cpM.JmolK, 2), 60.60)
self.assertEqual(round(st.w, 0), 387)
st = Ethylene(T=280, P=5e6, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 11.098)
self.assertEqual(round(st.uM.Jmol, 0), 20068)
self.assertEqual(round(st.hM.Jmol, 0), 20519)
self.assertEqual(round(st.sM.JmolK, 2), 158.00)
self.assertEqual(round(st.cvM.JmolK, 2), 43.83)
self.assertEqual(round(st.cpM.JmolK, 1), 254.2)
self.assertEqual(round(st.w, 0), 311)
st = Ethylene(T=385, P=8e6, eq="jahangiri")
self.assertEqual(round(st.rhoM, 4), 3.1028)
self.assertEqual(round(st.uM.Jmol, 0), 28666)
self.assertEqual(round(st.hM.Jmol, 0), 31244)
self.assertEqual(round(st.sM.JmolK, 2), 190.15)
self.assertEqual(round(st.cvM.JmolK, 2), 45.30)
self.assertEqual(round(st.cpM.JmolK, 2), 67.89)
self.assertEqual(round(st.w, 0), 336)
st = Ethylene(T=110, P=1e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 23.250)
self.assertEqual(round(st.uM.Jmol, 1), 6922.6)
self.assertEqual(round(st.hM.Jmol, 1), 7352.8)
self.assertEqual(round(st.sM.JmolK, 2), 87.21)
self.assertEqual(round(st.cvM.JmolK, 2), 46.18)
self.assertEqual(round(st.cpM.JmolK, 2), 69.34)
self.assertEqual(round(st.w, 0), 1779)
st = Ethylene(T=244, P=2e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 17.396)
self.assertEqual(round(st.uM.Jmol, 0), 15489)
self.assertEqual(round(st.hM.Jmol, 0), 16638)
self.assertEqual(round(st.sM.JmolK, 2), 139.68)
self.assertEqual(round(st.cvM.JmolK, 2), 38.06)
self.assertEqual(round(st.cpM.JmolK, 2), 69.84)
self.assertEqual(round(st.w, 0), 992)
st = Ethylene(T=390, P=3e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 11.067)
self.assertEqual(round(st.uM.Jmol, 0), 24895)
self.assertEqual(round(st.hM.Jmol, 0), 27606)
self.assertEqual(round(st.sM.JmolK, 2), 172.52)
self.assertEqual(round(st.cvM.JmolK, 2), 47.06)
self.assertEqual(round(st.cpM.JmolK, 2), 79.47)
self.assertEqual(round(st.w, 0), 565)
st = Ethylene(T=130, P=4e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 22.926)
self.assertEqual(round(st.uM.Jmol, 1), 8020.7)
self.assertEqual(round(st.hM.Jmol, 1), 9765.5)
self.assertEqual(round(st.sM.JmolK, 2), 96.52)
self.assertEqual(round(st.cvM.JmolK, 2), 44.99)
self.assertEqual(round(st.cpM.JmolK, 2), 67.44)
self.assertEqual(round(st.w, 0), 1779)
st = Ethylene(T=350, P=5e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 15.003)
self.assertEqual(round(st.uM.Jmol, 0), 21092)
self.assertEqual(round(st.hM.Jmol, 0), 24425)
self.assertEqual(round(st.sM.JmolK, 2), 159.84)
self.assertEqual(round(st.cvM.JmolK, 2), 44.46)
self.assertEqual(round(st.cpM.JmolK, 2), 70.13)
self.assertEqual(round(st.w, 0), 867)
st = Ethylene(T=240, P=8e7, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 19.935)
self.assertEqual(round(st.uM.Jmol, 0), 13969)
self.assertEqual(round(st.hM.Jmol, 0), 17982)
self.assertEqual(round(st.sM.JmolK, 2), 132.07)
self.assertEqual(round(st.cvM.JmolK, 2), 40.20)
self.assertEqual(round(st.cpM.JmolK, 2), 62.12)
self.assertEqual(round(st.w, 0), 1434)
st = Ethylene(T=350, P=1e8, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 17.490)
self.assertEqual(round(st.uM.Jmol, 0), 19860)
self.assertEqual(round(st.hM.Jmol, 0), 25578)
self.assertEqual(round(st.sM.JmolK, 2), 154.41)
self.assertEqual(round(st.cvM.JmolK, 2), 46.02)
self.assertEqual(round(st.cpM.JmolK, 2), 65.80)
self.assertEqual(round(st.w, 0), 1203)
st = Ethylene(T=350, P=2.6e8, eq="jahangiri")
self.assertEqual(round(st.rhoM, 3), 20.953)
self.assertEqual(round(st.uM.Jmol, 0), 18436)
self.assertEqual(round(st.hM.Jmol, 0), 30845)
self.assertEqual(round(st.sM.JmolK, 2), 145.92)
self.assertEqual(round(st.cvM.JmolK, 2), 48.98)
self.assertEqual(round(st.cpM.JmolK, 2), 64.39)
self.assertEqual(round(st.w, 0), 1792)
def test_shortSpan(self):
# Table III, Pag 46
st = Ethylene(T=700, rho=200, eq="shortSpan")
self.assertEqual(round(st.cp0.kJkgK, 4), 2.7682)
self.assertEqual(round(st.P.MPa, 3), 48.416)
self.assertEqual(round(st.cp.kJkgK, 4), 3.065)
st2 = Ethylene(T=750, rho=100, eq="shortSpan")
self.assertEqual(round(st2.h.kJkg-st.h.kJkg, 2), 174.10)
self.assertEqual(round(st2.s.kJkgK-st.s.kJkgK, 5), 0.47681)
def test_Assael(self):
# Table 5, pag 8
self.assertEqual(round(Ethylene(T=200, rho=0).k.mWmK, 2), 10.39)
self.assertEqual(round(Ethylene(T=300, rho=0).k.mWmK, 2), 21.01)
self.assertEqual(round(Ethylene(T=400, rho=0).k.mWmK, 2), 36.36)
self.assertEqual(round(Ethylene(T=500, rho=0).k.mWmK, 2), 55.05)
self.assertEqual(round(Ethylene(T=200, P=1e5).k.mWmK, 2), 10.54)
self.assertEqual(round(Ethylene(T=300, P=1e5).k.mWmK, 2), 21.09)
self.assertEqual(round(Ethylene(T=400, P=1e5).k.mWmK, 2), 36.40)
self.assertEqual(round(Ethylene(T=500, P=1e5).k.mWmK, 2), 55.07)
self.assertEqual(round(Ethylene(T=200, P=5e7).k.mWmK, 1), 190.4)
self.assertEqual(round(Ethylene(T=300, P=5e7).k.mWmK, 1), 126.9)
self.assertEqual(round(Ethylene(T=400, P=5e7).k.mWmK, 2), 98.08)
self.assertEqual(round(Ethylene(T=500, P=5e7).k.mWmK, 2), 94.57)
self.assertEqual(round(Ethylene(T=200, P=1e8).k.mWmK, 1), 223.5)
self.assertEqual(round(Ethylene(T=300, P=1e8).k.mWmK, 1), 164.3)
self.assertEqual(round(Ethylene(T=400, P=1e8).k.mWmK, 1), 132.0)
self.assertEqual(round(Ethylene(T=500, P=1e8).k.mWmK, 1), 121.3)
self.assertEqual(round(Ethylene(T=200, P=1.5e8).k.mWmK, 1), 252.9)
self.assertEqual(round(Ethylene(T=300, P=1.5e8).k.mWmK, 1), 196.4)
self.assertEqual(round(Ethylene(T=400, P=1.5e8).k.mWmK, 1), 161.7)
self.assertEqual(round(Ethylene(T=500, P=1.5e8).k.mWmK, 1), 145.8)
self.assertEqual(round(Ethylene(T=200, P=2e8).k.mWmK, 1), 280.5)
self.assertEqual(round(Ethylene(T=300, P=2e8).k.mWmK, 1), 226.5)
self.assertEqual(round(Ethylene(T=400, P=2e8).k.mWmK, 1), 190.3)
self.assertEqual(round(Ethylene(T=500, P=2e8).k.mWmK, 1), 170.6)
# Critical enhancement point, section 3.1.4, pag 8
self.assertEqual(round(Ethylene(T=300, rho=300).k.mWmK, 2), 69.62)
def test_Holland(self):
# Single phase selected point
# Viscosity, Table 5, pag 924
# Thermal Conductivity, Table 6, pag 927
st = Ethylene(T=110, P=1e5, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 5660.5)
self.assertEqual(round(st.k.mWmK, 2), 261.77)
st = Ethylene(T=140, P=1e6, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 2769.8)
self.assertEqual(round(st.k.mWmK, 2), 223.14)
st = Ethylene(T=200, P=5e6, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 1223.7)
self.assertEqual(round(st.k.mWmK, 1), 158.5)
st = Ethylene(T=300, P=1e5, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 103.8)
self.assertEqual(round(st.k.mWmK, 2), 20.58)
st = Ethylene(T=130, P=1e7, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 3278.5)
self.assertEqual(round(st.k.mWmK, 2), 244.97)
st = Ethylene(T=300, P=5e7, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 759.0)
self.assertEqual(round(st.k.mWmK, 2), 129.40)
st = Ethylene(T=500, P=1e5, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 165.1)
self.assertEqual(round(st.k.mWmK, 2), 49.95)
st = Ethylene(T=500, P=5e7, eq="mccarty", thermal=1)
self.assertEqual(round(st.mu.microP, 1), 394.1)
self.assertEqual(round(st.k.mWmK, 2), 93.57)
# Saturated liquid values, Table 7, pag 930
# The density values differ so the calculated transport properties
# difffer in that cases, I think the paper use the ancillary equation
# for liquid saturated density
st = Ethylene(T=200, x=0, eq="mccarty", thermal=1)
self.assertEqual(round(st.rhoM, 3), 18.575)
self.assertEqual(round(st.mu.microP, 1), 1204.9)
self.assertEqual(round(st.k.mWmK, 1), 153.5)
# Dilute gas values, Table 8, pag 930
st = Ethylene(T=180, rho=0, thermal=1)
self.assertEqual(round(st.mu.microP, 1), 63.6)
self.assertEqual(round(st.k.mWmK, 1), 10.0)
st = Ethylene(T=250, rho=0, thermal=1)
self.assertEqual(round(st.mu.microP, 1), 86.6)
self.assertEqual(round(st.k.mWmK, 1), 14.9)
st = Ethylene(T=400, rho=0, thermal=1)
self.assertEqual(round(st.mu.microP, 1), 135.7)
self.assertEqual(round(st.k.mWmK, 1), 34.6)
st = Ethylene(T=500, rho=0, thermal=1)
self.assertEqual(round(st.mu.microP, 1), 164.8)
self.assertEqual(round(st.k.mWmK, 1), 49.9)
st = Ethylene(T=680, rho=0, thermal=1)
self.assertEqual(round(st.mu.microP, 1), 211.6)
self.assertEqual(round(st.k.mWmK, 1), 91.2)
|
jjgomera/pychemqt
|
lib/mEoS/Ethylene.py
|
Python
|
gpl-3.0
| 50,303
|
[
"Avogadro",
"Jmol"
] |
18f9ef83c6af04f6e9d6b81fdbd0824515ae0cf5d445d700bcddbb53d2991984
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
import chigger
camera = vtk.vtkCamera()
camera.SetViewUp(0.0105, 0.1507, 0.9885)
camera.SetPosition(15.6131, -0.3930, 0.0186)
camera.SetFocalPoint(0.0000, 0.0000, 0.1250)
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e')
mug = chigger.exodus.ExodusResult(reader, block=[76], representation='points', camera=camera, color=[0,1,0])
window = chigger.RenderWindow(mug, size=[300,300], test=True)
window.update();window.resetCamera() #TODO: This is needed to make results render correctly, not sure why
window.write('points.png')
window.start()
|
backmari/moose
|
python/chigger/tests/wireframe/points.py
|
Python
|
lgpl-2.1
| 1,484
|
[
"MOOSE",
"VTK"
] |
1c2904800d01985384e37984b79bcb84227a88e88e02b74cd5206accadd61afd
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 Robert Weidlich. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE LICENSOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from django.db.models import Min, Max
from django.conf import settings
from autoslug import AutoSlugField
from django_hosts.resolvers import reverse
from polymorphic import PolymorphicModel
from easy_thumbnails.fields import ThumbnailerImageField
import jsonfield
import os.path
import uuid
from radioportal import util
class Show(models.Model):
name = models.CharField(max_length=50, unique=True,
verbose_name=_('Name of the show'),
)
slug = AutoSlugField(populate_from='name', always_update=True)
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
url = models.URLField(blank=True, default='',
verbose_name=_('Homepage of the Show'))
twitter = models.CharField(max_length=100, blank=True, default='',
help_text='Name of the associated Twitter account')
chat = models.CharField(max_length=100, blank=True, default='',
help_text='Location for listeners to chat with eachother. Examples: irc://irc.freenode.net/#xsn, https://slackin.community.metaebene.me/')
description = models.CharField(max_length=200, blank=True, default='',
verbose_name=_("Description"))
abstract = models.TextField(blank=True, default='',
verbose_name=_('Longer description of the show'))
shownotes_id = models.CharField(max_length=100, blank=True, default='',
verbose_name=_('ID of this show on shownot.es'))
LICENCES = (
('none', _('none')),
('cc-by', _('cc-by')),
('cc-by-sa', _('cc-by-sa')),
('cc-by-nd', _('cc-by-nd')),
('cc-by-nc', _('cc-by-nc')),
('cc-by-nc-sa', _('cc-by-nc-sa')),
('cc-by-nc-nd', _('cc-by-nc-nd')),
)
licence = models.CharField(max_length=100,
choices=LICENCES, default=LICENCES[0][0], blank=True,
verbose_name=_("Licence"))
defaultShortName = models.SlugField(default='',
help_text=_('Used to construct the episode' +
' identifier.'),
verbose_name=_("Abbreviation of the show"))
nextEpisodeNumber = models.PositiveIntegerField(default=1,
help_text=_('The number of the next episode to be aired. Used to construct the episode identifier'),
verbose_name=_("Number of the next episode"))
icon = ThumbnailerImageField(upload_to="show-icons/", blank=True)
icon_url = models.URLField(blank=True, max_length=255)
icon_etag = models.CharField(blank=True, max_length=255)
public_email = models.EmailField(default="", blank=True)
donation_url = models.URLField(blank=True, default='', max_length=512,
verbose_name=_('URL for donations (flattr, paypal.me) for this show'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse("show", kwargs={'show_name': self.slug}, host='www')
class Meta:
permissions = (
('change_episodes', _('Delete Episodes')),
)
from easy_thumbnails.signals import saved_file
from easy_thumbnails.signal_handlers import generate_aliases_global
saved_file.connect(generate_aliases_global)
class PodcastFeed(models.Model):
PODCASTFIELDS = (
("//channel/title/text()", "//channel/title/text()"),
("//channel/link/text()", "//channel/link/text()"),
("//channel/description/text()", "//channel/description/text()"),
("//channel/copyright/text()", "//channel/copyright/text()"),
("//channel/image/url/text()", "//channel/image/url/text()"),
("//channel/atom:link[@rel='payment']/@href", "//channel/atom:link[@rel='payment']/@href"),
("//channel/itunes:summary/text()", "//channel/itunes:summary/text()"),
("//channel/itunes:owner/itunes:email/text()", "//channel/itunes:owner/itunes:email/text()"),
("//channel/itunes:image/@href", "//channel/itunes:image/@href"),
("//channel/itunes:subtitle/text()", "//channel/itunes:subtitle/text()"),
)
show = models.OneToOneField(Show)
enabled = models.BooleanField(verbose_name=_("Enable"), default=False)
feed_url = models.URLField(max_length=240,
blank=True,
verbose_name=_("Feed of the podcast"),)
feed_url_enabled = models.BooleanField(default=False)
name_enabled = models.BooleanField(default=False)
name_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[0][0],
max_length=127)
name_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
url_enabled = models.BooleanField(default=False)
url_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[1][0],
max_length=127)
url_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
description_enabled = models.BooleanField(default=False)
description_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[2][0],
max_length=127)
description_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
abstract_enabled = models.BooleanField(default=False)
abstract_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[6][0],
max_length=127)
abstract_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
icon_enabled = models.BooleanField(default=False)
icon_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[4][0],
max_length=127)
icon_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
public_email_enabled = models.BooleanField(default=False)
public_email_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[7][0],
max_length=127)
public_email_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
donation_url_enabled = models.BooleanField(default=False)
donation_url_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[5][0],
max_length=127)
donation_url_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
licence_enabled = models.BooleanField(default=False)
licence_xpath = models.CharField(choices=PODCASTFIELDS,
default=PODCASTFIELDS[3][0],
max_length=127)
licence_regex = models.CharField(default="(?P<value>.*)",
max_length=127)
class ICalFeed(models.Model):
ICALFIELDS = (
('SUMMARY', 'SUMMARY'),
('DESCRIPTION', 'DESCRIPTION'),
('LOCATION', 'LOCATION'),
)
show = models.OneToOneField(Show)
enabled = models.BooleanField(verbose_name=_("Enable"), default=False)
url = models.URLField(max_length=255, blank=True, verbose_name=_("iCal feed for upcoming shows"))
slug_field = models.CharField(choices=ICALFIELDS, default="SUMMARY", max_length=50)
slug_regex = models.CharField(max_length=255, default="(?P<value>{show.defaultShortName}[0-9]+)")
title_field = models.CharField(choices=ICALFIELDS, default="SUMMARY", max_length=50)
title_regex = models.CharField(max_length=255, default="{show.defaultShortName}[0-9]+ (?P<value>.+)")
description_field = models.CharField(choices=ICALFIELDS, default="DESCRIPTION", max_length=50)
description_regex = models.CharField(max_length=255, default="(?P<value>.+)")
url_field = models.CharField(choices=ICALFIELDS, default="LOCATION", max_length=50)
url_regex = models.CharField(max_length=255, default="(?P<value>http[^ ]+)")
filter_field = models.CharField(choices=ICALFIELDS, default="DESCRIPTION", max_length=50)
filter_regex = models.CharField(max_length=255, default="#noshow")
delete_missing = models.BooleanField(default=True)
@receiver(post_save, sender=Show)
def create_default_icalfeed(sender, instance, created, raw, *args, **kwargs):
if not created or raw:
return
feed = ICalFeed(show=instance)
feed.save()
podcast = PodcastFeed(show=instance)
podcast.save()
class EpisodeSource(PolymorphicModel):
pass
class ICalEpisodeSource(EpisodeSource):
source = models.ForeignKey(ICalFeed)
identifier = models.CharField(max_length=128)
class Episode(models.Model):
"""
a single Episode of a show, which should be relayed or was relayed in
the past
"""
#available options for dashboard users
PUBLIC_STATUS = (
('ARCHIVED', _("Archived Episode")),
('UPCOMING', _("Upcoming Episode")),
)
STATUS = (
('ARCHIVED', _("Archived Episode")),
('RUNNING', _("Running Episode")),
('UPCOMING', _("Upcoming Episode")),
)
show = models.ForeignKey(Show, verbose_name=_("Show"))
slug = models.SlugField(max_length=30, default='',
verbose_name=_("Short Name"))
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
source = models.OneToOneField(EpisodeSource, null=True)
def begin(self):
return self.parts.aggregate(Min('begin'))['begin__min']
def end(self):
return self.parts.aggregate(Max('end'))['end__max']
def title(self):
if len(self.parts.all()) > 0:
return self.parts.all()[0].title
return None
def description(self):
if len(self.parts.all()) > 0:
return self.parts.all()[0].description
return None
def url(self):
if len(self.parts.all()) > 0:
return self.parts.all()[0].url
return None
def get_id(self):
return unicode("%s-%s") % (self.show.slug, self.slug)
status = models.CharField(max_length=10,
choices=STATUS, default=STATUS[2][0])
current_part = models.ForeignKey('EpisodePart', blank=True, null=True, related_name="current_episode")
def __unicode__(self):
if len(self.parts.all()) > 0:
return self.parts.all()[0].__unicode__()
else:
return u'%s' % self.slug
# def save(self, force_insert=False, force_update=False):
# #if self.title == '':
# # self.title = _("Episode %(number)s of %(show)s") % \
# # {'number': self.slug, 'show': self.show.name}
# self.slug = self.slug.lower()
# if self.slug == re.sub("\W", "", self.title):
# self.title = ""
# super(Episode, self).save(force_insert, force_update)
class Meta:
unique_together = (('show', 'slug'),)
def get_absolute_url(self):
return reverse("episode", kwargs={'show_name': self.show.slug, 'slug': self.slug}, host='www')
class EpisodePart(models.Model):
"""
a part of an episode, i.e 'intro' or 'interview with first caller'
should be used for timelines
"""
episode = models.ForeignKey(Episode, related_name='parts')
title = models.CharField(max_length=200, blank=True, default='', verbose_name=_("Topic"))
description = models.CharField(max_length=200, blank=True,
default='', verbose_name=_("Description"))
begin = models.DateTimeField(verbose_name=_("Begin"))
end = models.DateTimeField(blank=True, null=True, verbose_name=_("End"))
url = models.URLField(blank=True,
help_text=_('Page of the Episode'),
verbose_name=_("URL"))
shownotes_id = models.CharField(max_length=100, blank=True, default='',
verbose_name=_('ID of this episode on shownot.es'))
def __unicode__(self):
return u'%s%s%s' % (self.episode.slug, " " if self.title else"", self.title, )
class Meta:
ordering = ['-id']
class Marker(models.Model):
episode = models.ForeignKey(EpisodePart)
pointoftime = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=255)
link = models.URLField(blank=True)
delete = models.BooleanField(default=True)
GTYPES = (
('server', _("Listener Statistics Grouped by Server")),
('mount', _("Listener Statistics Grouped by Mount Point")),
)
def get_graphic_path(instance, filename):
dn = "/".join(["graphics",
instance.episode.episode.show.slug])
fn = "%s.png" % uuid.uuid4()
fdn = os.path.join(settings.MEDIA_ROOT, dn)
if not os.path.exists(fdn):
os.makedirs(fdn)
return "/".join([dn, fn])
class Graphic(models.Model):
file = models.ImageField(upload_to=get_graphic_path, blank=True)
type = models.CharField(max_length=10, choices=GTYPES, default='')
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
data = jsonfield.JSONField()
episode = models.ForeignKey('EpisodePart', related_name='graphics')
def __unicode__(self):
return self.file.name + unicode(": ") + unicode(self.episode)
class Recording(models.Model):
episode = models.ForeignKey('EpisodePart', related_name='recordings')
path = models.CharField(max_length=250, unique=True)
format = models.CharField(max_length=50)
bitrate = models.CharField(max_length=50)
publicURL = models.URLField(default='')
isPublic = models.BooleanField(default=False)
size = models.PositiveIntegerField()
running = models.BooleanField(default=False)
def __unicode__(self):
return self.path
class Channel(models.Model):
# Status
#running = models.BooleanField(default=False,
# help_text=_("A Stream of this channel is running"))
def running(self):
return self.stream_set.filter(running=True).count() > 0
# Meta data from stream
cluster = models.CharField(max_length=40, unique=True,
help_text=_("Stream identifier from backend"))
streamCurrentSong = models.CharField(max_length=255,
blank=True, default='',
help_text=_(u"Property »current_song« from stream meta data"))
streamGenre = models.CharField(max_length=250, blank=True, default='',
help_text=_(u"Property »genre« from stream meta data"))
streamShow = models.CharField(max_length=250, blank=True, default='',
help_text=_(u"Property »show« from stream meta data"))
streamDescription = models.CharField(max_length=250,
blank=True, default='',
help_text=_(u"Property »description« from stream meta data"))
streamURL = models.URLField(blank=True, default='',
help_text=_(u"Property »url« from stream meta data"))
show = models.ManyToManyField(Show, blank=True,
help_text=_('show which is assigned to this channel'),
verbose_name=_('Associated shows'))
mapping_method = jsonfield.JSONField(
verbose_name=_('Method for mapping between streams and episodes'),
help_text=_('Configure order for mapping methods. Use drop-down box for adding new items, [x] for removing items and drag&drop for changing order.'),
)
currentEpisode = models.OneToOneField(Episode, blank=True, null=True)
listener = models.IntegerField(default=0)
recording = models.BooleanField(default=True, editable=False)
public_recording = models.BooleanField(default=True, editable=False)
# agb_accepted = models.BooleanField(default=False, editable=False)
# agb_accepted_date = models.DateTimeField(auto_now_add=True, editable=False)
graphic_differ_by = models.CharField(max_length=255, blank=True)
graphic_title = models.CharField(max_length=255, blank=True)
def running_streams(self):
return self.stream_set.filter(running=True).exclude(format="aac")
def updateRunning(self):
self.running = False
for stream in self.stream_set.all():
if stream.running:
self.running = True
break
self.save()
def __unicode__(self):
return _("Channel for %(cluster)s") % {'cluster': self.cluster}
class Meta:
permissions = (
('change_stream', 'Change Stream'),
)
class Stream(models.Model):
"""
a single stream for a episode of a show, which is relayed by different
Relays
"""
channel = models.ForeignKey(Channel)
mount = models.CharField(max_length=80, unique=True)
running = models.BooleanField(default=False)
CODECS = (
('mp3', _('MP3')),
('aac', _('AAC')),
('vorbis', _('Vorbis')),
('theora', _('Theora')),
('opus', _('Opus')),
)
codec = models.CharField(max_length=100,
choices=CODECS, default=CODECS[0][0])
CONTAINERS = (
('none', _('None')),
('ogg', _('Ogg')),
('mpegts', _('MPEG/TS')),
)
container = models.CharField(max_length=100,
choices=CONTAINERS, default=CONTAINERS[0][0])
TRANSPORTS = (
('http', _('HTTP (Icecast)')),
('hls', _('Apple HTTP Live Streaming')),
)
transport = models.CharField(max_length=100,
choices=TRANSPORTS, default=TRANSPORTS[0][0])
def get_format(self):
return "%s-%s-%s" % (self.codec, self.container, self.transport)
def set_format(self, format):
self.codec, self.container, self.transport = format.split("-")
FORMATS = (
('mp3', _('MP3')),
('aac', _('AAC')),
('ogg', _('Ogg/Vorbis')),
('ogm', _('Ogg/Theora')),
)
format = models.CharField(max_length=100,
choices=FORMATS, default=FORMATS[0][0])
BITRATES = (
('32', '~32 KBit/s'),
('40', '~40 KBit/s'),
('48', '~48 KBit/s'),
('56', '~56 KBit/s'),
('64', '~64 KBit/s'),
('72', '~72 KBit/s'),
('80', '~80 KBit/s'),
('88', '~88 KBit/s'),
('96', '~96 KBit/s'),
('104', '~104 KBit/s'),
('112', '~112 KBit/s'),
('120', '~120 KBit/s'),
('128', '~128 KBit/s'),
('136', '~136 KBit/s'),
('144', '~144 KBit/s'),
('152', '~152 KBit/s'),
('160', '~160 KBit/s'),
('168', '~168 KBit/s'),
('176', '~176 KBit/s'),
('184', '~184 KBit/s'),
('192', '~192 KBit/s'),
('200', '~200 KBit/s'),
('208', '~208 KBit/s'),
('216', '~216 KBit/s'),
('224', '~224 KBit/s'),
('232', '~232 KBit/s'),
('240', '~240 KBit/s'),
('248', '~248 KBit/s'),
('256', '~256 KBit/s'),
)
bitrate = models.CharField(max_length=100, choices=BITRATES, default=BITRATES[12][0])
ENCODINGS = (
('UTF-8', _('UTF-8')),
('ISO8859-15', _('ISO8859-15')),
)
encoding = models.CharField(max_length=255,
choices=ENCODINGS, default=ENCODINGS[0][0])
def displayFormat(self):
f = ""
if self.transport == "hls":
f += self.get_transport_display()
f += " "
if self.container == "ogg":
f += self.get_container_display()
f += "/"
f += self.get_codec_display()
if self.bitrate and "k" in self.bitrate:
f += " %sKBit/s" % self.bitrate[:-1]
if "q" in self.bitrate:
f += " Quality %s" % self.bitrate[1:]
return f
def mimetype(self):
if self.transport == "http":
return "audio/%s" % self.codec
elif self.transport == "hls":
return "application/vnd.apple.mpegurl"
def updateRunning(self):
self.running = False
self.save()
if self.channel_id:
self.channel.updateRunning()
def __unicode__(self):
return unicode("%s at %s at %s" % \
(self.format, self.bitrate, self.mount))
WAVE = (
('none', _("No Fallback")),
('sine', _("Sine")),
('square', _("Square")),
('saw', _("Saw")),
('triangle', _("Triangle")),
('silence', _("Silence")),
('white-noise', _("White uniform noise")),
('pink-noise', _("Pink noise")),
('sine-table', _("Sine table")),
('ticks', _("Periodic Ticks")),
('gaussian-noise', _("White Gaussian noise")),
)
fallback = models.CharField(max_length=255,
choices=WAVE, default=WAVE[0][0])
def get_absolute_url(self):
return reverse("mount", kwargs={'stream': self.mount}, host='www')
# def save(self, force_insert=False, force_update=False):
# super(Stream, self).save(force_insert, force_update)
class SourcedStream(Stream):
user = models.CharField(max_length=255, blank=True, default="source")
password = models.CharField(max_length=255, blank=True)
class RecodedStream(Stream):
source = models.ForeignKey(SourcedStream, related_name='recoded')
class HLSStream(RecodedStream):
pass
class ShowRequest(models.Model):
name = models.CharField(max_length=100, verbose_name=_("Name of the Podcast"))
STATUS = (
('NEW', _('New')),
('UNCONFIR', _('Unconfirmed')),
('ACCEPTED', _('Accepted')),
('DECLINED', _('Declined'))
)
status = models.CharField(choices=STATUS, default=STATUS[0][0], max_length=8)
feed = models.URLField(blank=True, verbose_name=_("Feed of the Podcast, used for automatic extraction of master data"))
ical = models.URLField(blank=True, verbose_name=_("HTTP-Adress of ICal File with announcements of live broadcasts"))
user = models.ForeignKey(User, related_name='show_requests')
show = models.ForeignKey(Show, related_name='request', blank=True, null=True)
reviewer = models.ForeignKey(User, related_name='reviews', blank=True, null=True)
create_time = models.DateTimeField(auto_now_add=True)
review_time = models.DateTimeField(blank=True, null=True)
review_note = models.CharField(default='', blank=True, max_length=400)
def __unicode__(self):
return u"<Request for %s by %s>" % (self.name, self.user)
import allauth.account.signals
import radioportal.dashboard.signup.handlers
allauth.account.signals.email_confirmed.connect(radioportal.dashboard.signup.handlers.email_confirmed_)
post_save.connect(radioportal.dashboard.signup.handlers.handle_request, sender=ShowRequest)
class Status(models.Model):
name = models.CharField(max_length=100)
status = models.PositiveSmallIntegerField()
verbose_status = models.CharField(max_length=100)
timestamp = models.DateTimeField()
category = models.CharField(max_length=100)
step = models.PositiveIntegerField()
def value(self):
import datetime
now = datetime.datetime.now()
diff = now - self.timestamp
return ((diff.seconds - self.step) * 1.0 / self.step)
def __unicode__(self):
return u"<Status for %s>" % self.name
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
class Message(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
message_object = generic.GenericForeignKey('content_type', 'object_id')
timestamp = models.DateTimeField(auto_now_add=True)
origin = models.CharField(max_length=50)
message = models.CharField(max_length=255)
severity = models.PositiveIntegerField(default=3)
read = models.BooleanField(default=False)
class Meta:
ordering = ['read','-timestamp']
import reversion
reversion.register(Show)
reversion.register(PodcastFeed)
reversion.register(Episode)
reversion.register(EpisodePart)
reversion.register(Marker)
# reversion.register(Graphic)
# reversion.register(Recording)
reversion.register(Channel)
reversion.register(Stream)
reversion.register(SourcedStream)
reversion.register(RecodedStream)
reversion.register(Status)
# from django.db.models.signals import post_save
# def saved(sender, instance, created, **kwargs):
# print "Saved (models.py:314): ", sender, repr(instance)
#
#
# post_save.connect(saved, Show)
# post_save.connect(saved, PodcastFeed)
# post_save.connect(saved, Episode)
# post_save.connect(saved, EpisodePart)
# post_save.connect(saved, Graphic)
# post_save.connect(saved, Recording)
# post_save.connect(saved, Channel)
# post_save.connect(saved, Stream)
# post_save.connect(saved, SourcedStream)
# post_save.connect(saved, RecodedStream)
# post_save.connect(saved, Status)
class NotificationPath(models.Model):
def get(self):
if hasattr(self, "twitteraccount"):
return self.twitteraccount
elif hasattr(self, "httpcallback"):
return self.httpcallback
elif hasattr(self, "ircchannel"):
return self.ircchannel
elif hasattr(self, "auphonicaccount"):
return self.auphonicaccount
def name(self):
return self.get().name()
def __unicode__(self):
return self.get().__unicode__()
class HTTPCallback(NotificationPath):
url = models.URLField()
def name(self):
return u"http"
def __unicode__(self):
return _(u"HTTP Callback %s" % self.url)
class HTTPCallbackHeader(models.Model):
name = models.CharField(max_length=250)
value = models.CharField(max_length=250)
callback = models.ForeignKey(HTTPCallback)
class IRCChannel(NotificationPath):
url = models.CharField(max_length=250)
def name(self):
return u"irc"
def __unicode__(self):
return _(u"IRC Channel %s" % self.url)
class TwitterAccount(NotificationPath):
screen_name = models.CharField(max_length=250)
oauth_token = models.CharField(max_length=250)
oauth_secret = models.CharField(max_length=250)
def name(self):
return u"twitter"
def __unicode__(self):
return _(u"Twitter Account @%s" % self.screen_name)
class AuphonicAccount(NotificationPath):
access_token = models.CharField(max_length=250)
username = models.CharField(max_length=250)
userid = models.CharField(max_length=250)
preset = models.CharField(max_length=250, blank=True)
start_production = models.BooleanField(default=False)
def name(self):
return u"auphonic"
def __unicode__(self):
return _(u"Auphonic Account %s" % self.username)
class NotificationTemplate(models.Model):
text = models.CharField(max_length=250, blank=True)
def __unicode__(self):
return self.text
class PrimaryNotification(models.Model):
path = models.ForeignKey(NotificationPath)
show = models.ForeignKey(Show)
start = models.OneToOneField(NotificationTemplate, related_name="start")
stop = models.OneToOneField(NotificationTemplate, related_name="stop")
rollover = models.OneToOneField(NotificationTemplate, related_name="rollover")
system = models.BooleanField(default=False)
def __unicode__(self):
return _(u"Notification for %(show)s on %(path)s" % {'show': unicode(self.show), 'path': unicode(self.path)})
@receiver(post_delete, sender=PrimaryNotification)
def post_delete_primarynotification(sender, instance, *args, **kwargs):
if instance.start:
instance.start.delete()
if instance.stop:
instance.stop.delete()
if instance.rollover:
instance.rollover.delete()
if instance.path and not instance.path.primarynotification_set.all():
instance.path.delete()
class SecondaryNotification(models.Model):
path = models.ForeignKey(NotificationPath)
show = models.ForeignKey(Show)
primary = models.ForeignKey(PrimaryNotification, blank=True, null=True)
def __unicode__(self):
ret = _(u"Retweet Notification on %s" % unicode(self.path))
if self.primary:
ret += _(" from %s" % unicode(self.primary.path))
return ret
@receiver(post_delete, sender=SecondaryNotification)
def post_delete_secondarynotification(sender, instance, *args, **kwargs):
if instance.path and not instance.path.secondarynotification_set.all():
instance.path.delete()
@receiver(post_save, sender=Show)
def create_default_notifications(sender, instance, created, raw, *args, **kwargs):
if raw or not created:
return
# Twitter
twitter = TwitterAccount(screen_name="xenim", oauth_token=settings.TWITTER_ACCOUNT_TOKEN, oauth_secret=settings.TWITTER_ACCOUNT_SECRET)
twitter.save()
start_tw = NotificationTemplate(text="Sendung {name} ({channel}) hat angefangen {streams}")
start_tw.save()
stop_tw = NotificationTemplate(text="")
stop_tw.save()
rollover_tw = NotificationTemplate(text="")
rollover_tw.save()
twitter_noti = PrimaryNotification(path=twitter, show=instance, start=start_tw, stop=stop_tw, rollover=rollover_tw, system=True)
twitter_noti.save()
# IRC intern
irc = IRCChannel(url="irc://irc.freenode.net/#xsn-intern")
irc.save()
start_irc = NotificationTemplate(text="Sendung {name} ({channel}) hat angefangen {streams}")
start_irc.save()
stop_irc = NotificationTemplate(text="Sendung {channel} ist beendet")
stop_irc.save()
rollover_irc = NotificationTemplate(text="")
rollover_irc.save()
irc_noti = PrimaryNotification(path=irc, show=instance, start=start_irc, stop=stop_irc, rollover=rollover_irc, system=True)
irc_noti.save()
# IRC public
irc2 = IRCChannel(url="irc://irc.freenode.net/#xsn")
irc2.save()
start_irc2 = NotificationTemplate(text="Sendung {name} ({channel}) hat angefangen {streams}")
start_irc2.save()
stop_irc2 = NotificationTemplate(text="Sendung {channel} ist beendet")
stop_irc2.save()
rollover_irc2 = NotificationTemplate(text="")
rollover_irc2.save()
irc2_noti = PrimaryNotification(path=irc2, show=instance, start=start_irc2, stop=stop_irc2, rollover=rollover_irc2, system=True)
irc2_noti.save()
|
funkenstrahlen/django-radioportal
|
radioportal/models.py
|
Python
|
bsd-3-clause
| 31,854
|
[
"Gaussian"
] |
c6a46923459faa2d147231ff13578abae5d51945840fc94d124f83dd2637f88d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
try:
import simplejson as json
except:
import json
from collections import Counter
import unicodedata
from time import strftime, gmtime
"""
12 December 2014
for each of {body, title}:
the unicodeSignature is the sequence of >ascii codepoints, in order, space-separated
the unicodeCatalog is the bag of >ascii codepoints, sorted/agglomerated using space, comma-separated
the unicodeHistogram is a json-encoded python dict/json object mapping codepoint to count
the unicodeBlockSignature is the sequence of block descriptors (of all >ascii), in order, space-separated
the unicodeBlockCatalog is the bag of block descriptors, sorted/agglomerated using space, comma-separated
the unicodeBlockHistogram is a json-encoded python dict/json object mapping block descriptor to count
the unicodeCategorySignature is the sequence of category descriptors (of all >ascii), in order, space-separated
the unicodeCategoryCatalog is the bag of category descriptors, sorted/agglomerated using space, comma-separated
the unicodeCategoryHistogram is a json-encoded python dict/json object mapping category descriptor to count
where block and category descriptors are defined via
# From http://stackoverflow.com/a/245072
# retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
# Blocks-5.1.0.txt
# Date: 2008-03-20, 17:41:00 PDT [KW]
and is formatted to using _ rather than ,/space/-
"""
def isAscii(c):
try:
return ord(c) <= 127
except:
return False
def gentime():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def fmtCodepoint(codepoint, style):
return codepoint
def fmtMetadatum(metadatum, style):
def fmtValue(s):
return re.sub("[ -]", "_", re.sub(",", "", unicode(s)))
if style=="category":
category = categoryCodeDescription(unicodedata.category(metadatum))
# return "category:" + fmtValue(category)
return fmtValue(category)
elif style=="block":
# return "block:" + fmtValue(block(metadatum))
return fmtValue(block(metadatum))
else:
return None
# From http://stackoverflow.com/a/245072
_blocks = []
def _initBlocks(text):
pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
for line in text.splitlines():
m = pattern.match(line)
if m:
start, end, name = m.groups()
_blocks.append((int(start, 16), int(end, 16), name))
# retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
_initBlocks('''
# Blocks-5.1.0.txt
# Date: 2008-03-20, 17:41:00 PDT [KW]
#
# Unicode Character Database
# Copyright (c) 1991-2008 Unicode, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
# For documentation, see UCD.html
#
# Note: The casing of block names is not normative.
# For example, "Basic Latin" and "BASIC LATIN" are equivalent.
#
# Format:
# Start Code..End Code; Block Name
# ================================================
# Note: When comparing block names, casing, whitespace, hyphens,
# and underbars are ignored.
# For example, "Latin Extended-A" and "latin extended a" are equivalent.
# For more information on the comparison of property values,
# see UCD.html.
#
# All code points not explicitly listed for Block
# have the value No_Block.
# Property: Block
#
# @missing: 0000..10FFFF; No_Block
0000..007F; Basic Latin
0080..00FF; Latin-1 Supplement
0100..017F; Latin Extended-A
0180..024F; Latin Extended-B
0250..02AF; IPA Extensions
02B0..02FF; Spacing Modifier Letters
0300..036F; Combining Diacritical Marks
0370..03FF; Greek and Coptic
0400..04FF; Cyrillic
0500..052F; Cyrillic Supplement
0530..058F; Armenian
0590..05FF; Hebrew
0600..06FF; Arabic
0700..074F; Syriac
0750..077F; Arabic Supplement
0780..07BF; Thaana
07C0..07FF; NKo
0900..097F; Devanagari
0980..09FF; Bengali
0A00..0A7F; Gurmukhi
0A80..0AFF; Gujarati
0B00..0B7F; Oriya
0B80..0BFF; Tamil
0C00..0C7F; Telugu
0C80..0CFF; Kannada
0D00..0D7F; Malayalam
0D80..0DFF; Sinhala
0E00..0E7F; Thai
0E80..0EFF; Lao
0F00..0FFF; Tibetan
1000..109F; Myanmar
10A0..10FF; Georgian
1100..11FF; Hangul Jamo
1200..137F; Ethiopic
1380..139F; Ethiopic Supplement
13A0..13FF; Cherokee
1400..167F; Unified Canadian Aboriginal Syllabics
1680..169F; Ogham
16A0..16FF; Runic
1700..171F; Tagalog
1720..173F; Hanunoo
1740..175F; Buhid
1760..177F; Tagbanwa
1780..17FF; Khmer
1800..18AF; Mongolian
1900..194F; Limbu
1950..197F; Tai Le
1980..19DF; New Tai Lue
19E0..19FF; Khmer Symbols
1A00..1A1F; Buginese
1B00..1B7F; Balinese
1B80..1BBF; Sundanese
1C00..1C4F; Lepcha
1C50..1C7F; Ol Chiki
1D00..1D7F; Phonetic Extensions
1D80..1DBF; Phonetic Extensions Supplement
1DC0..1DFF; Combining Diacritical Marks Supplement
1E00..1EFF; Latin Extended Additional
1F00..1FFF; Greek Extended
2000..206F; General Punctuation
2070..209F; Superscripts and Subscripts
20A0..20CF; Currency Symbols
20D0..20FF; Combining Diacritical Marks for Symbols
2100..214F; Letterlike Symbols
2150..218F; Number Forms
2190..21FF; Arrows
2200..22FF; Mathematical Operators
2300..23FF; Miscellaneous Technical
2400..243F; Control Pictures
2440..245F; Optical Character Recognition
2460..24FF; Enclosed Alphanumerics
2500..257F; Box Drawing
2580..259F; Block Elements
25A0..25FF; Geometric Shapes
2600..26FF; Miscellaneous Symbols
2700..27BF; Dingbats
27C0..27EF; Miscellaneous Mathematical Symbols-A
27F0..27FF; Supplemental Arrows-A
2800..28FF; Braille Patterns
2900..297F; Supplemental Arrows-B
2980..29FF; Miscellaneous Mathematical Symbols-B
2A00..2AFF; Supplemental Mathematical Operators
2B00..2BFF; Miscellaneous Symbols and Arrows
2C00..2C5F; Glagolitic
2C60..2C7F; Latin Extended-C
2C80..2CFF; Coptic
2D00..2D2F; Georgian Supplement
2D30..2D7F; Tifinagh
2D80..2DDF; Ethiopic Extended
2DE0..2DFF; Cyrillic Extended-A
2E00..2E7F; Supplemental Punctuation
2E80..2EFF; CJK Radicals Supplement
2F00..2FDF; Kangxi Radicals
2FF0..2FFF; Ideographic Description Characters
3000..303F; CJK Symbols and Punctuation
3040..309F; Hiragana
30A0..30FF; Katakana
3100..312F; Bopomofo
3130..318F; Hangul Compatibility Jamo
3190..319F; Kanbun
31A0..31BF; Bopomofo Extended
31C0..31EF; CJK Strokes
31F0..31FF; Katakana Phonetic Extensions
3200..32FF; Enclosed CJK Letters and Months
3300..33FF; CJK Compatibility
3400..4DBF; CJK Unified Ideographs Extension A
4DC0..4DFF; Yijing Hexagram Symbols
4E00..9FFF; CJK Unified Ideographs
A000..A48F; Yi Syllables
A490..A4CF; Yi Radicals
A500..A63F; Vai
A640..A69F; Cyrillic Extended-B
A700..A71F; Modifier Tone Letters
A720..A7FF; Latin Extended-D
A800..A82F; Syloti Nagri
A840..A87F; Phags-pa
A880..A8DF; Saurashtra
A900..A92F; Kayah Li
A930..A95F; Rejang
AA00..AA5F; Cham
AC00..D7AF; Hangul Syllables
D800..DB7F; High Surrogates
DB80..DBFF; High Private Use Surrogates
DC00..DFFF; Low Surrogates
E000..F8FF; Private Use Area
F900..FAFF; CJK Compatibility Ideographs
FB00..FB4F; Alphabetic Presentation Forms
FB50..FDFF; Arabic Presentation Forms-A
FE00..FE0F; Variation Selectors
FE10..FE1F; Vertical Forms
FE20..FE2F; Combining Half Marks
FE30..FE4F; CJK Compatibility Forms
FE50..FE6F; Small Form Variants
FE70..FEFF; Arabic Presentation Forms-B
FF00..FFEF; Halfwidth and Fullwidth Forms
FFF0..FFFF; Specials
10000..1007F; Linear B Syllabary
10080..100FF; Linear B Ideograms
10100..1013F; Aegean Numbers
10140..1018F; Ancient Greek Numbers
10190..101CF; Ancient Symbols
101D0..101FF; Phaistos Disc
10280..1029F; Lycian
102A0..102DF; Carian
10300..1032F; Old Italic
10330..1034F; Gothic
10380..1039F; Ugaritic
103A0..103DF; Old Persian
10400..1044F; Deseret
10450..1047F; Shavian
10480..104AF; Osmanya
10800..1083F; Cypriot Syllabary
10900..1091F; Phoenician
10920..1093F; Lydian
10A00..10A5F; Kharoshthi
12000..123FF; Cuneiform
12400..1247F; Cuneiform Numbers and Punctuation
1D000..1D0FF; Byzantine Musical Symbols
1D100..1D1FF; Musical Symbols
1D200..1D24F; Ancient Greek Musical Notation
1D300..1D35F; Tai Xuan Jing Symbols
1D360..1D37F; Counting Rod Numerals
1D400..1D7FF; Mathematical Alphanumeric Symbols
1F000..1F02F; Mahjong Tiles
1F030..1F09F; Domino Tiles
20000..2A6DF; CJK Unified Ideographs Extension B
2F800..2FA1F; CJK Compatibility Ideographs Supplement
E0000..E007F; Tags
E0100..E01EF; Variation Selectors Supplement
F0000..FFFFF; Supplementary Private Use Area-A
100000..10FFFF; Supplementary Private Use Area-B
# EOF
''')
def block(ch):
'''
Return the Unicode block name for ch, or None if ch has no block.
>>> block(u'a')
'Basic Latin'
>>> block(unichr(0x0b80))
'Tamil'
>>> block(unichr(0xe0080))
'''
assert isinstance(ch, unicode) and len(ch) == 1, repr(ch)
cp = ord(ch)
for start, end, name in _blocks:
if start <= cp <= end:
return name
categoryCodeDescriptions = {'Cc': "Other, Control",
'Cf': "Other, Format",
# 'Cn': "Other, Not Assigned (no characters in the file have this property)",
'Cn': "Other, Not Assigned",
'Co': "Other, Private Use",
'Cs': "Other, Surrogate",
'LC': "Letter, Cased",
'Ll': "Letter, Lowercase",
'Lm': "Letter, Modifier",
'Lo': "Letter, Other",
'Lt': "Letter, Titlecase",
'Lu': "Letter, Uppercase",
'Mc': "Mark, Spacing Combining",
'Me': "Mark, Enclosing",
'Mn': "Mark, Nonspacing",
'Nd': "Number, Decimal Digit",
'Nl': "Number, Letter",
'No': "Number, Other",
'Pc': "Punctuation, Connector",
'Pd': "Punctuation, Dash",
'Pe': "Punctuation, Close",
# 'Pf': "Punctuation, Final quote (may behave like Ps or Pe depending on usage)",
# 'Pi': "Punctuation, Initial quote (may behave like Ps or Pe depending on usage)",
'Pf': "Punctuation, Final quote",
'Pi': "Punctuation, Initial quote",
'Po': "Punctuation, Other",
'Ps': "Punctuation, Open",
'Sc': "Symbol, Currency",
'Sk': "Symbol, Modifier",
'Sm': "Symbol, Math",
'So': "Symbol, Other",
'Zl': "Separator, Line",
'Zp': "Separator, Paragraph",
'Zs': "Separator, Space"}
def categoryCodeDescription(category):
return categoryCodeDescriptions.get(category, "Not Available")
def analyze(part):
content = part["text"]
codepointSeq = []
categorySeq = []
blockSeq = []
codepointHisto = Counter()
categoryHisto = Counter()
blockHisto = Counter()
for c in content:
if not isAscii(c):
codepointHisto[c] += 1
codepointSeq.append(c)
cat = fmtMetadatum(c, 'category')
blk = fmtMetadatum(c, 'block')
if cat:
categoryHisto[cat] += 1
categorySeq.append(cat)
if blk:
blockHisto[blk] += 1
blockSeq.append(blk)
# Normal form KD
# presumed of minor importance: omitted for now
# categoryHisto["normalized:" + unicodedata.normalize(c.decode('utf-8'),'NFKD')] += 1
contentElements = codepointSeq
# Histogram: JSON-encoded string repn of the dict
part["unicodeHistogram"] = json.dumps(codepointHisto)
# Signature: sequence of codepoints
part["unicodeSignature"] = " ".join(codepointSeq)
# Catalog: bag of codepoints
codepointCatalogElements = []
for k in sorted(codepointHisto.keys()):
v = codepointHisto[k]
# v copies of this key
codepointCatalogElements.append(" ".join([k for _ in xrange(v)]))
part["unicodeCatalog"] = ", ".join(codepointCatalogElements)
# Histogram: JSON-encoded string repn of the dict
part["unicodeCategoryHistogram"] = json.dumps(categoryHisto)
# Signature: sequence of codepoints
part["unicodeCategorySignature"] = " ".join(categorySeq)
# Catalog: bag of categories
categoryCatalogElements = []
for k in sorted(categoryHisto.keys()):
v = categoryHisto[k]
# v copies of this key
categoryCatalogElements.append(" ".join([k for _ in xrange(v)]))
part["unicodeCategoryCatalog"] = ", ".join(categoryCatalogElements)
# Histogram: JSON-encoded string repn of the dict
part["unicodeBlockHistogram"] = json.dumps(blockHisto)
# Signature: sequence of codepoints
part["unicodeBlockSignature"] = " ".join(blockSeq)
# Catalog: bag of blocks
blockCatalogElements = []
for k in sorted(blockHisto.keys()):
v = blockHisto[k]
# v copies of this key
blockCatalogElements.append(" ".join([k for _ in xrange(v)]))
part["unicodeBlockCatalog"] = ", ".join(blockCatalogElements)
return part
# Test data
# HEART = u'\u2665'
# SMILY = u'\u263a'
# TSU = u'\u30C4'
# LEFT = u'\u27E8'
# RIGHT = u'\u27E9'
# EURO = u'\u20AC'
# if True:
# TESTUNICODE = LEFT + "h" + EURO + "llo " + HEART + HEART + SMILY + TSU + " goodby" + EURO + " " + SMILY + TSU + HEART + HEART + HEART + HEART + RIGHT
# print len(TESTUNICODE)
# print json.dumps(TESTUNICODE)
# TESTDOC = {"@context": "http://localhost:8080/publish/JSON/WSP1WS6-select unix_timestamp(a_importtime)*1000 as timestamp, a_* from ads a join sample s on a_id=s_id limit 50-context.json","schema:provider": {"a": "Organization", "uri": "http://memex.zapto.org/data/organization/1"}, "snapshotUri": "http://memex.zapto.org/data/page/850753E7323B188B93E6E28F730F2BFBFB1CE00B/1396493689000/raw","a": "WebPage","dateCreated": "2013-09-24T18:28:00","hasBodyPart": {"text": TESTUNICODE, "a": "WebPageElement"}, "hasTitlePart": {"text": "\u270b\u270b\u270bOnly Best \u270c\u270c\u270c Forget The Rest \u270b\u270b\u270b Outcall Specials TONIGHT \u270c\ud83d\udc8b\ud83d\udc45 Sexy Blonde is UP LATE \ud83d\udc9c\ud83d\udc9b\u270b\u270c - 25", "a": "WebPageElement"}, "uri": "http://memex.zapto.org/data/page/850753E7323B188B93E6E28F730F2BFBFB1CE00B/1396493689000/processed"}
# analyze(TESTDOC["hasBodyPart"])
# json.dump(TESTDOC, sys.stdout, indent=4);
# exit(0)
for line in sys.stdin:
try:
(url, jrep) = line.split('\t')
d = json.loads(jrep)
analyze(d["hasBodyPart"])
analyze(d["hasTitlePart"])
# insert gmtime
# ensure it doesn't collide with any other gentime
d["unicodeGentime"] = gentime()
print url + "\t",
json.dump(d, sys.stdout, sort_keys=True)
print
except ValueError as e:
print >> sys.stderr, e
pass
|
usc-isi-i2/dig-unicode
|
python/unicode-histogram.py
|
Python
|
apache-2.0
| 15,156
|
[
"FEFF"
] |
731cc7a57790fcbe05f7af19b7250d04369323136c7c3624d648f2cc5be4a1b2
|
import textwrap
import re
# TODO - maybe get Dave's help in coming up with content?
# TODO - fix up room descriptions for typos, etc.
# TODO - put back in (part of) Michael's description of Dave's office
# TODO - entering the stall w/deoderant should print different msg the first time you do that
'''TODO - make sure all items play a roll
you need key (from ???) to unlock storage room to get plunger
use plunger in stall, voice says "The price has been paid", and you have something in possession...what?
you need to eat the donut to leave the Bistro, but you have to go to the bistro to get something
have another locked door besides storage room, but key doesn't work in it
meme can be swapped for anything, but it then gets put in some random other room. Same thing happens to it
whenever you pick anything up.
donut has to magically reappear after being eaten, for other players (The friendly staff at the Bistro put another delicious donut on the plate)
notebook - has some clue
note - has a clue about connection between notebook and student
basketball - if you give this to Mr. Peterson, you get something
statue - ???
random number - examining it provides a clue about a locker combo. Do they have those?
pop tart - ???. Something about exchanging it for the donut?
'''
# Multi-player stuff
# When entering a room, or exiting a room, update every other player's msg queue
# Be able to swap items if player in room with you
# Be able to get a hall pass from Angelina that lets you "swap" for something, but then hall pass disappears
# But to get the hall pass, you have to answer a question correctly
# TODO - change all xxx_command functions into check_xxx_command functions, so they can
# be responsible for handling things like "drop the xxx" vs. "drop xxx"
# TODO - use a Player class versus a player dictionary and a bunch of functions
# TODO - use a Room class versus a room dictionary and a bunch of functions
# TODO - use an Item class versus an item dictionary and a bunch of functions
# TODO - catch print calls by trapping stdout ala codecademy framework, and warn if so
# TODO - look needs to handle special description - push that logic into print function?
# TODO - need way to broadcast a msg to everyone when somebody enters a room (everybody in that room gets it)
# same thing when an item magically appears in a room.
class Player:
def __init__(self, name):
self.name = name
self.room = None
self.visited_rooms = set()
self.items = set()
self.score = 0
self.msgs = []
def get_msgs(self):
return self.msgs
def remove_msgs(self):
result = self.msgs
self.msgs = []
return result
def add_msg(self, msg):
# msg might be coming in with leading spaces before
# each line, which we want to remove. So use handy
# regular expression (re) to remove those.
msg = re.sub("(^|\n)[ ]+", " ", msg)
# wrap (or re-wrap) msg so that each line (which ends
# with a \n) will be at most 70 characters long. This
# will also turn the msg string into a list of strings,
# one per (wrapped to 70 chars) line.
msg_lines = textwrap.wrap(msg, 70)
# Get the player's current list of messages, and add
# our new list of messages.
self.msgs += msg_lines
# Add a blank line between each group of lines that
# get added.
self.msgs += [""]
# Limit the player's messages to at most 20 lines.
self.msgs = self.msgs[-20:]
def get_room(self):
return self.room
def set_room(self, room_name):
self.room = room_name
if not room_name in self.visited_rooms:
self.visited_rooms.add(room_name)
return True
else:
return False
# Add the player "player_name" to the game, by creating a new player dictionary
# and filling it with default values (no score, no messages, etc)
def player_add(player_name):
global g_players
player = {"name": player_name, "room": None, "visited_rooms": set(), "items": set(), "score":0, "msgs": []}
g_players[player_name] = player
# Get the list of messages for the player
def player_get_msgs(player_name):
global g_players
return g_players[player_name]["msgs"]
# Remove all of the player's current messages.
def player_remove_msgs(player_name):
global g_players
msgs = g_players[player_name]["msgs"]
g_players[player_name]["msgs"] = []
return msgs
# Add a message for the player.
def player_add_msg(player_name, msg):
global g_players
# msg might be coming in with leading spaces before
# each line, which we want to remove. So use handy
# regular expression (re) to remove those.
msg = re.sub("(^|\n)[ ]+", " ", msg)
# wrap (or re-wrap) msg so that each line (which ends
# with a \n) will be at most 70 characters long. This
# will also turn the msg string into a list of strings,
# one per (wrapped to 70 chars) line.
msg_lines = textwrap.wrap(msg, 70)
# Get the player's current list of messages, and add
# our new list of messages.
msgs = player_get_msgs(player_name)
msgs += msg_lines
# Add a blank line between each group of lines that
# get added.
msgs += [""]
# Limit the player's messages to at most 20 lines.
msgs = msgs[-20:]
# Update the player's messages with the result of above.
g_players[player_name]["msgs"] = msgs
# Get the name of the room where the player is currently located
def player_get_room(player_name):
global g_players
return g_players[player_name]["room"]
# Move the player to the room <room_name>
def player_set_room(player_name, room_name):
global g_players
g_players[player_name]["room"] = room_name
if not room_name in player_get_visited_rooms(player_name):
g_players[player_name]["visited_rooms"].add(room_name)
return True
else:
return False
# Get the set of room names that the player has visited
def player_get_visited_rooms(player_name):
global g_players
return g_players[player_name]["visited_rooms"]
# Get the player's current score
def player_get_score(player_name):
global g_players
return g_players[player_name]["score"]
# Set the player's current score
def player_set_score(player_name, score):
global g_players
g_players[player_name]["score"] = score
# Get the player's set of item names that they've taken
def player_get_items(player_name):
return g_players[player_name]["items"]
# Add an item to the player's set of item names.
def player_add_item(player_name, item_name):
player_get_items(player_name).add(item_name)
# Remove an item named <item_name> from the player's set of
# items.
def player_remove_item(player_name, item_name):
player_get_items(player_name).remove(item_name)
def player_has_item(player_name, item_name):
return item_name in player_get_items(player_name)
# Print the room's description by adding ot the player's
# list of messages.
def print_room_description(player_name, room_name):
global g_rooms
# Retrieve the current room by name
room = g_rooms[room_name]
# Print the room's description
player_add_msg(player_name, room['description'])
# If we're entering the stall w/o the deoderant, we are going to be printing out a special message and
# then immediately exiting the room without printing a list of items.
# TODO use more general approach to this
if room_name == "Stall" and not player_has_item(player_name, "deoderant"):
return
# Get the room's item list
item_names = room['items']
# Print a comma-separated list of the room's items, if any.
items_text = format_item_names(item_names, "see")
if (items_text):
player_add_msg(player_name, items_text)
def check_move_command(player_name, room_name, command):
global g_rooms
shortcuts = {"n": "north", "s": "south", "e": "east", "w": "west"}
if command in shortcuts:
command = shortcuts[command]
# Get the current room by name
room = g_rooms[room_name]
# See if command is the name of a door in this room.
doors = room['doors']
# If so, move there and return True, otherwise return False.
if doors.has_key(command):
move_to_room(player_name, doors[command])
return True
elif command in {"north", "south", "east", "west", "up", "down"}:
player_add_msg(player_name, "You can't go %s from here" % command)
return True
else:
return False
# Handle special cases for entering a room.
# TODO add enter_special_room and exit_special_room
def enter_special_room(player_name, room_name):
if room_name == "Stall" and not player_has_item(player_name, "deoderant"):
player_add_msg(player_name, "You wake up to find yourself back in the Boys Bathroom")
player_set_room(player_name, "Boys Bathroom")
return
def move_to_room(player_name, room_name):
visited_room_before = room_name in player_get_visited_rooms(player_name)
# Update the player's current room
player_set_room(player_name, room_name)
# If they've already visited the room,
# print a short message and return.
if visited_room_before:
# Handle "You are in in Mr. xxx's rooom", where there is no "the " before the
# room name.
preposition = "the "
if "preposition" in g_rooms[room_name]:
preposition = g_rooms[room_name]["preposition"]
player_add_msg(player_name, "You are in %s%s" % (preposition, room_name))
enter_special_room(player_name, room_name)
return
# TODO add special logic here to handle visiting Dave's Room for the
# first time. You'd need to set a new attribute for the user, to keep
# track of how many times they'd been in his room, and change the description
# that gets printed out when they "look".
#
# Since this is the first time in this room, print the full room
# description and add the room's score to the player's score, and
# print a msg about how many points they've earned.
# Print the room description
print_room_description(player_name, room_name)
# Get the new room by name
room = g_rooms[room_name]
# The player has earned the new room's value
room_value = room['value']
# Congratulate the player on his/her progress
if (room_value > 0):
player_set_score(player_name, player_get_score(player_name) + room_value)
player_add_msg(player_name, "You just earned %s points!" % room_value)
enter_special_room(player_name, room_name)
def check_take_command(player_name, command):
global g_rooms
# First use the handy-dandy get_item_name utility function
# to extract the item name from the command.
item_name = get_item_name("take", command)
if (item_name == None):
# If it isn't "take" or "take xxx" then return false, it's not
# the take command
return False
elif item_name == "":
player_add_msg(player_name, "You didn't specify what to take")
return True
# Get the current room by name
room_name = player_get_room(player_name)
room = g_rooms[room_name]
# Get the room's item list
room_item_names = room['items']
# If xxx isn't the name of an item in the room, then
# add a different error message.
if (room_item_names.count(item_name) == 0):
player_add_msg(player_name, "There is no %s in this room" % item_name)
return True
# if xxx isn't takeable, add an error message.
if not g_items[item_name]["takeable"]:
player_add_msg(player_name, "You can't take the %s" % item_name)
return True
# Otherwise, move the item from the room's list of items,
# add the item to the player's inventory,
# and print a confirmation string
room_item_names.remove(item_name)
player_add_item(player_name, item_name)
player_add_msg(player_name, "You now have the %s" % item_name)
return True
def examine_item_command(player_name, room_name, command):
global g_items
# First use the handy-dandy get_item_name utility function
# to extract the item name from the command.
item_name = get_item_name("examine", command)
# If it's just "examine" then return an appropriate
# error message.
if (item_name == None):
player_add_msg(player_name, '''Lexi & Emilie say - "You didn't say what to examine"''')
return
# Use the handy-dandy player_has_item utility function to check if
# the player has the item in his/her possession, and print an
# error message if not.
if not player_has_item(player_name, item_name):
# But wait - you can examine an item not in your possession if it's in the
# room with you and it's not takeable.
if item_name in g_rooms[room_name]["items"] and not g_items[item_name]["takeable"]:
# OK, drop through to following code.
None
else:
player_add_msg(player_name, '''Lexi & Emilie say - "No way, Jose. You do not have the %s"''' % item_name)
return
item = g_items[item_name]
if item_name == "notebook" and g_items["notebook"]["open"]:
description = item["open description"]
else:
description = item["description"]
player_add_msg(player_name, '''Lexi & Emilie say - "%s"''' % description)
def drop_item_command(player_name, room_name, command):
global g_rooms
# First use the handy-dandy get_item_name utility function
# to extract the item name from the command.
item_name = get_item_name("drop", command)
# If it's just "drop" then return an appropriate
# error message.
if (not item_name):
player_add_msg(player_name, '''Sol says - "You didn't say what to drop"''')
return
# Get the current room by name
room = g_rooms[room_name]
# Get the room's item list
room_item_names = room['items']
# Use the handy-dandy player_has_item utility function to check if
# the player has the item in his/her possession, and print an
# error message if not.
if not item_name in player_get_items(player_name):
player_add_msg(player_name, '''Sol says - "Listen buddy, you don't have the %s"''' % item_name)
return
# Otherwise, remove the item from the player's inventory,
# and add it to the room's list of items, and print a
# confirmation string.
player_remove_item(player_name, item_name)
room_item_names.append(item_name)
player_add_msg(player_name, '''Sol says - "You no longer have the %s"''' % item_name)
# End of solution code
def print_intro(player_name):
player_add_msg(player_name, '''Yinneboma says - "Welcome to The Bitney Adventure Game, %s. This is the campus,
you will be exploring the grounds, finding items, and navigating in all directions
like North, South, East, West, Up and Down. Play the Bitney Adventure Game alone or
play with your friends. There are 44 rooms filled with adventure
and finding Items and Keys to Unlock locked doors. Play The Adventure that is Bitney."''' % player_name)
def print_help(player_name):
player_add_msg(player_name, '''Aschia says - "Play the game by typing commands to move north, south, east or west and
up or down in certain situations. Also you may type commands to
interact with objects in the rooms. For example: take, drop, examine, eat, etc.
And there are commands to list, look, plus bye to end the game"''')
def print_goodbye(player_name):
player_add_msg(player_name, '''Ben says - "Thank you for exploring the wonderful Bitney campus, %s.
We hope you enjoyed your adventure and learned a little bit more about the interesting
things that go on here. You have %d Bitney points"''' % (player_name, player_get_score(player_name)))
def format_item_names(item_names, verb):
if (len(item_names) > 0):
items_text = "You %s the following item%s: " % (verb, ("" if len(item_names) == 1 else "s"))
for item_name in item_names:
items_text += (item_name + ", ")
items_text = items_text[:-2] # remove that last comma & space
return items_text
else:
return None
# Print the name of every item that the player has taken.
def print_items(player_name):
# Use player_get_items to get the set of items the player has.
item_names = player_get_items(player_name)
items_text = format_item_names(item_names, "have")
if items_text == None:
items_text = "You've got nothing"
player_add_msg(player_name, items_text)
def check_general_command(player_name, room_name, command):
# TODO see if the command is one that we want to respond to, without
# doing anything. E.g. if they're swearing at us, tell them to keep
# it clean. If we don't have any match, return False.
swearing = {"fuck", "shit", "cunt", "bastard"}
for word in command.split():
if word.lower() in swearing:
player_add_msg(player_name, '''Makenna says - "Keep it clean, %s"''' % player_name)
return True
if command.startswith("hit"):
print '''Makenna says - "There's no violence allowed at Bitney!"'''
return True
return False
if command.startswith("hit "):
player_add_msg(player_name, "There's no violence allowed at Bitney!")
return True
else:
return False
def check_item_command(player_name, room_name, command):
# TODO see if the command is for any of the items in the player's
# list of items. If so, print out an appropriate message, and do
# the action, and return True so that we know the command has been
# handled.
if command == "eat donut":
if not "donut" in player_get_items(player_name):
player_add_msg(player_name, "You don't have a donut to eat")
else:
# TODO you need to remove the donut from the player's list of
# items, because they've eaten it.
player_add_msg(player_name, "Yum, that was tasty!")
player_remove_item(player_name, "donut")
# We handled the command, so return True
return True
elif command == "open notebook":
if not "notebook" in player_get_items(player_name):
player_add_msg(player_name, "You don't have a notebook to open")
elif g_items["notebook"]["open"]:
player_add_msg(player_name, "You already opened the notebook")
else:
player_add_msg(player_name, g_items["notebook"]["open description"])
g_items["notebook"]["open"] = True
return True
elif command == "close notebook":
if not "notebook" in player_get_items(player_name):
player_add_msg(player_name, "You don't have a notebook to close")
elif not g_items["notebook"]["open"]:
player_add_msg(player_name, "The notebook is already closed")
else:
player_add_msg(player_name, "You closed the notebook")
g_items["notebook"]["open"] = False
return True
else:
# We didn't handle the command, so return False
return False
def game_complete(player_name):
if player_get_score(player_name) >= 300:
player_add_msg(player_name, "Eli says - congratulations on beating the game!")
return True
else:
return False
# This is a handy utility routine that you give an action (like "take")
# and a command (like "take key"), and it returns just the item (e.g. "key")
# It lower-cases the thing being taken, so that it's easier to compare to
# the item names (keys) in g_items dictionary
def get_item_name(action, command):
if (command == action) or (command == action + " the"):
# They didn't specify what to take
return ""
elif command.startswith(action + " the "):
return command[len(action) + len(" the "):]
elif command.startswith(action + " "):
return command[len(action) + len(" "):]
else:
return None
# This is a debugging function that ensures all rooms are reachable, and all
# doors lead to a named room.
def check_room_graph():
current_room = g_rooms.keys().pop(0)
visited_rooms = set()
check_room(visited_rooms, current_room)
# Now verify that we visited every room.
for room in g_rooms.keys():
if not room in visited_rooms:
print "We never visited %s" % room
def check_room(visited_rooms, room):
if not room in visited_rooms:
visited_rooms.add(room)
if not room in g_rooms.keys():
print "The room %s doesn't exist" % room
return
doors = g_rooms[room]["doors"]
for door in doors.keys():
next_room = doors[door]
# print "%s from %s goes to %s" % (door, room, next_room)
check_room(visited_rooms, next_room)
# This is a debugging function that ensures all items are located in some
# room, but only one room.
def check_items():
global g_items
global g_rooms
missing_items = set(g_items.keys())
found_items = set()
for room in g_rooms.keys():
if not "items" in g_rooms[room].keys():
print "Room %s is missing its list of items" % room
return
room_items = g_rooms[room]["items"]
for room_item in room_items:
if room_item in found_items:
print "%s is in two different rooms" % room_item
elif not room_item in missing_items:
print "%s isn't a known item" % room_item
else:
# print "We found %s in %s" % (room_item, room)
found_items.add(room_item)
missing_items.remove(room_item)
for missing_item in missing_items:
print "%s is not in any room" % missing_item
def print_and_clear_msgs(player_name):
msgs = player_remove_msgs(player_name)
for msg in msgs:
print msg
# g_players is a dictionary, where the key is the player name, and the value is a "player"
# Each player is also a dictionary, where the key is one of several possible player attributes
# name: Player name
# room: Name of room the player is in
# visited_rooms: Set of room names the player has visited. It starts
# off with just the current room the player is in, and
# gets added to as a player visits new rooms.
# items: Set of items the player has taken. It starts off empty.
# When the player takes an item, it gets moved from the
# room's set of items to the player's set of items. The
# inverse happens when a player drops an item they have.
# score: The player's current score. They get points for visiting
# a room, but only the first time.
# msgs: List of messages generated for the player.
g_players = {}
# rooms is a dictionary, where the key is the room name, and the value is a "room"
# Each room is also a dictionary, where the key is one of several possible values
# description -> string that describes the room. This should include all doors.
# items -> list of item names for items found in that room
# value -> points for visiting the room
# doors -> dictionary that maps from a door name ("north", "up", etc) to a room name
#
# You can also have other room-specific attributed, e.g. the computer lab could have
# a "locked": True attribute, and you have to unlock it first before you can go through
# that door. Use your imagination.
g_rooms = {
"Computer Lab": {
"description": "The computer lab is filled with glowing screens and old chairs. Your back is to a white board. There is a door to the east.",
"items": ["notebook"],
"value": 5,
"doors": {"east": "Hallway"}
},
"Hallway": {
"description": "The hallway is filled with colorful murals, lockers line the western wall. The hallway extends north and south, and there are doors to the east and west.",
"items": [],
"value": 0,
"doors": {"west": "Computer Lab", "east": "Mr. Wood's Room", "north": "North Hallway", "south": "South Hallway"}
},
"North Hallway": {
"description": "The north hallway is decorated with colorful artwork. You see a door labeled 'Boys Bathroom' to your east. To the north appears to be a more open area, and to the south there is the hallway.",
"items": [],
"value": 0,
"doors": {"south": "Hallway", "north": "Atrium", "east": "Boys Bathroom"}
},
"South Hallway": {
"description": "The south hallway also holds more artwork and murals on the walls. There is the Girls Bathroom to the east, a door to the west, and an open area to the south. To the north is the hallway.",
"items": [],
"value": 0,
"doors": {"north": "Hallway", "south": "South Area", "west": "Storage Room", "east": "Girls Bathroom"}
},
"Boys Bathroom": {
"description": "The bathroom has a sink, a mirror, a urinal, and a stall. No surprises here. The stall appears to be occupied. The exit is to your west.",
"items": ["meme"],
"value": 0,
"doors": {"west": "North Hallway", "east": "Stall"}
},
"Stall": {
"description":
'''You walk in and notice the floor is flooded inside the stall yet outside it no water can be found. An incredible stench fills the air. A voice from the toilet speaks 'the price must be paid'.
Your eyes water, and you fall to the floor.''',
"description_with_deoderant":
'''You walk in and notice the floor is flooded inside the stall yet outside it no water can be found. An incredible stench fills the air, but the lavendar scent from your potpourri fights back, and you manage to
remain conscious. You notice the toilet is overflowing with water.''',
"items": [],
"value": 5,
"doors": {"west": "Boys Bathroom"}
},
"Girls Bathroom": {
"description": '''A calming pink room with art in progress on the walls. It has 2 stalls, 2 sinks, 2 mirrors
and a window. The exit is to your west.''',
"items": [],
"value": 2,
"doors": {"west": "South Hallway"}
},
"Storage Room": {
"description": "The storage room is strangely unlocked. Head east to return to the South Hallway.",
"items": ["plunger"],
"value": 0,
"doors": {"east": "South Hallway"}
},
"Atrium": {
"description": "A small room with a bench and some artwork. There is a door to the west, east, and north. The North Hallway is to the south...",
"items": ["crumpled note"],
"value": 5,
"doors": {"north": "Bistro", "west": "Math Room", "east": "Atrium Deck", "south": "North Hallway"}},
"South Area":
{"description": "An area well lit by the windows. There is a table with some chairs. There is a hallway to your north, a door to your east, south, and west. There is another door on the south wall, but it is locked",
"items": [],
"value": 0,
"doors": {"north": "South Hallway", "south": "Spanish Room", "west": "Mrs. Simpton's Room", "east": "Basketball Court"}},
"Mr. Wood's Room":
{"description": "The messy room of a maddened artist. This room is barely lit and has many tables. The walls are covered in propaganda all in different languages. There are two white bords one has a map of america and the other has a map of Russia. There is a life sized statue of George Washington in the room. There is a door to the north, east, and west",
"items": ["statue"],
"value": 1,
"preposition": "",
"doors": {"north": "Art Closet", "west": "Hallway", "east": "Picnic Tables"}},
"Art Closet":
{"description": "A cluttered confusion of art supplies. The odor of paint fills your nose. the exit is to your south.",
"items": [],
"value": 0,
"doors": {"south": "Mr. Wood's Room"}},
"Math Room":
{"description": "There are some tables and... math books? There is a door to the north and a door to the east.",
"items": [],
"value": 0,
"doors": {"north": "Atrium Deck", "east": "Atrium"}},
"Bistro":
{"description": "The place to be at lunch time. The bistro is a small closet of a room with quotes on every inch of the wall. It contains an abundance of tasty lunch-time snacks. there is a door to the east and a door to the south.",
"items": ["key", "donut"],
"value": 10,
"doors": {"south": "Atrium", "east": "Atrium Deck"}},
"Spanish Room":
{"description": "Mrs. Phillips is single-handedly teaching Spanish to all grades in a small, rectangular room. the exit is to the north.",
"items": [],
"value": 3,
"doors": {"north": "South Area"}},
"Atrium Deck":
{"description": "You end up outside on a deck. To the east you see some teachers talking in their area. To the west is a door",
"items": [],
"value": 5,
"doors": {"east": "Teacher Area", "west": "Atrium"}},
"Picnic Tables": {
"description": '''You are outside under a green tent, surrounded by green picnic tables whose tops are
polka-dotted with paint and bare spots. There is the remnant of a freshman's lunch on a table. To the
north are some teachers talking in their area. To the west is a door. To the south is a basketball court
that has a few cars parked on it.''',
"items": [],
"value": 10,
"doors": {"north": "Teacher Area", "south": "Basketball Court", "west": "Mr. Wood's Room", "east": "Fence Post"}
},
"Fence Post": {
"description": '''You just whacked your head into a fence post. Head west or south to turn around a different
direction''',
"items": [],
"value": 50,
"doors": {"west": "Picnic Tables", "south": "Greenhouse"}
},
"Greenhouse":
{"description": "you just whacked your head into the greenhouse.... that must have hurt.... head west to take a break at the Basketball Court tables",
"items": [],
"value": 40,
"doors": {"north": "Fence Post", "west": "Basketball Court"}},
"Teacher Area":
{"description": "you listen in on the teachers as they are discussing a student with low grades. they shoo you off claiming the conversation is confidential. to the north there is the parking area, to the south there is the Picnic tables. to the west there is a deck of sorts",
"items": [],
"value": 5,
"doors": {"north": "Parking Area", "south": "Picnic Tables", "west": "Atrium Deck"}},
"Mrs. Simpton's Room":
{"description": "A single windowed room with mysterious symbols on the walls. It smells strongly of body oder. The exit is to the east... better hurry! It smells!",
"items": ["deoderant"],
"value": 10,
"preposition": "",
"doors": {"east": "South Area"}},
"Science Room":
{"description": "A rather large room full of desks, chairs, and science tools. There are doors to the north, east, and south.",
"items": ["pop tart"],
"value": 10,
"doors": {"north": "Science Bathroom", "east": "Secret Hallway", "south": "Parking Area"}},
"Mr. Elkin's Car":
{"description": "A brown scion is parked, and Mr. Elkin is there, happily chewing on a sandwich. You look around, and notice a deck to your south, another door off to your north, and the base of some stairs to your west. There is also the smiley guys parking lot to the east. elkin warns you not to go because you may get run over, but you may try anyways.",
"items": [],
"value": 30,
"preposition": "",
"doors": {"east": "Smiley Guys Parking Lot", "south": "Office Porch", "west": "Base of Stairs", "north": "Humanities Hall"}},
"Smiley Guys Parking Lot":
{"description": "*crunch* *slam* *honk*. you just got hit by a car and are dead. But, seeing as you are new here, we'll give you a second chance at life. enter 'respawn' if you wish to try again",
"items": [],
"value": 100,
"doors": {"respawn": "Mr. Elkin's Car"}},
"Secret Hallway":
{"description": "A small unlit hallway with a door at either end, not very exciting. not sure why it's 'Secret'. there is a door to the west and east",
"items": [],
"value": 5,
"doors": {"east": "Humanities Hall", "west": "Science Room"}},
"Science Bathroom":
{"description": "A small bathroom with random scribblings on the wall, a painting made by Mr. Wood hangs above the toilet. The exit is to the south.",
"items": ["random number"],
"value": 20,
"doors": {"south": "Science Room"}},
"Humanities Bathroom":
{"description": "A cramped bathroom, the walls are painted a vibrant orange color. the exit is to the south.",
"items": [],
"value": 10,
"doors": {"south": "Humanities Hall"}},
"Humanities Hall":
{"description": "Several long tables form a 'U' shape facing a podium. There is an odd door to the north. there are also doors to the east, south, and west.",
"items": [],
"value": 20,
"doors": {"east": "Kill Room", "west": "Secret Hallway", "south": "Mr. Elkin's Car", "north": "Humanities Bathroom"}},
"Kill Room":
{"description": "Completely dark.... the clanking sounds of folded chairs can be hears. the exit is to the south.",
"items": [],
"value": 10,
"doors": {"west": "Humanities Hall"}},
"Parking Area":
{"description": "There are a bunch of parked cars around you. To the north you see a door labeled 'science'. to the east, you see a set of stairs. To the south you see a group of teachers talking.",
"items": [],
"value": 30,
"doors": {"north": "Science Room", "south": "Teacher Area", "east": "Base of Stairs"}},
"Base of Stairs":
{"description": "you find yourself at a base of stairs. you can either go south and go up, or you can west to the parking area, or east over the Mr. Elkin",
"items": [],
"value": 0,
"doors": {"south": "Back Porch", "east": "Mr. Elkin's Car", "west": "Parking Area"}},
"Back Porch":
{"description": "you find yourself on a porch to the back of the office building. you can either go north down the stairs in the direction of the parking area, or you can go east to a door that leads inside.",
"items": [],
"value": 10,
"doors": {"north": "Base of Stairs", "east": "Upstairs Area"}},
"Upstairs Area":
{"description": "There is a long table with many chairs around it. There are four doors, but two are labeled off limits. there is a door open to the north and south. there is also a door to the west. you also notice two sets of stairs heading down to a landing... you can go that way by commanding 'down'",
"items": [],
"value": 20,
"doors": {"north": "Russ' Office", "south": "Kitchen", "west": "Back Porch", "down": "Stair Landing"}},
"Parking Lot":
{"description": "*BRAAAAP*, you just got slammed by Dave on his bike. 'respawn' if you want to try and live again",
"items": [],
"value": 90,
"doors": {"respawn": "Office Porch"}},
"Office Porch":
{"description": "This is where the cool kids chill out. to the north you see Mr. elkin chewing on a sandwich. to the west there is a great wooden double door. to the east is the parking lot, which looks dangerous, but you may try to escape there for some food...",
"items": [],
"value": 40,
"doors": {"north": "Mr. Elkin's Car", "east": "Parking Lot", "west": "Lobby"}},
"Basketball Court":
{"description": "There is a basketball hoop with cars parked around it. not very good for playing basketball. there are picnic tables to the north, a door off the west, and a greenhouse to the east....",
"items": ["basketball"],
"value": 10,
"doors": {"north": "Picnic Tables", "west": "South Area", "east": "Greenhouse"}},
"Lobby":
{"description": "The lobby is a place where people go to chill. There are stairs to the west, there is a door to the south and east, and an approachable desk to the north",
"items": [],
"value": 2,
"doors": {"west": "Stair Landing", "north": "Angelina's Desk Area", "south": "Mr. Young's Room", "east": "Office Porch"}},
"Angelina's Desk Area":
{"description": "part of the office lobby where Angelina resides. There is a door to the north and west. A lobby is to your south",
"items": [],
"value": 10,
"preposition": "",
"doors": {"west": "Teacher's Lounge", "north": "Dave's Office", "south": "Lobby"}},
"Stair Landing":
{"description": "there are two stairs leading up to an upper area. go east to return to the lobby, command 'up' if you wish to go up",
"items": [],
"value": 0,
"doors": {"up": "Upstairs Area", "east": "Lobby"}},
"Mr. Young's Room":
{"description": "Where the Pop Tart king resides. This room is full of light from the windows. there is a cart, a desk, and a projector. To the north and west there are doors",
"items": [],
"value": 20,
"preposition": "",
"doors": {"north": "Lobby", "west": "Office Bathroom"}},
"Dave's Office":
{"description": "A small office with a round table, and a desk with a Mac on it. the exit is to the south",
"items": [],
"value": 20,
"preposition": "",
"doors": {"south": "Angelina's Desk Area"}},
"Teacher's Lounge":
{"description": "There is a table with chairs surrounding it, and a printer in the corner. There are several bookshelves. There is also a closet that has a sign saying 'KEEP OUT'. the exit is to the east.",
"items": [],
"value": 5,
"doors": {"east": "Angelina's Desk Area"}},
"Russ' Office":
{"description": "The room where Russ resides and handles the daily responsiblies of a principal which is upstairs of the office. the exit is to the south",
"items": [],
"value": 30,
"preposition": "",
"doors": {"south": "Upstairs Area"}},
"Kitchen":
{"description": "This room contains a refridgerator, stove, sink, and countertops. A couple windows. the exit is to the north.",
"items": [],
"value": 20,
"doors": {"north": "Upstairs Area"}},
"Office Bathroom":
{"description": "A bathroom that smells weird. there is a small window that is slightly ajar... not big enough to fit through. the exit is to the east.",
"items": [],
"value": 0,
"doors": {"east": "Mr. Young's Room"}},
}
# items is a dictionary, where the key is the item name, and the value is an "item"
# Each item is also a dictionary, where the key is one of several possible values
# description -> string that describes the item
# takeable -> boolean for whether the item can be taken or not.
#
# You can also have other item-specific attributed, e.g. a bottle of water could have
# an "empty": False attribute, and this changes to True after you've had a drink.
# Use your imagination.
g_items = {
"notebook": {
"open": False,
"description":
'''It's a typical lab notebook with a red cover''',
# Description after the notebook has been opened
"open description":
'''The notebook containing all kinds of complex diagrams, equations, assignments
(many with very low grades), etc. in a completely random order. None of the
pages have any students names on them, but Mr. Schneider has obviously written
in the name "Peggy???" in red ink on several of the graded assignments.''',
"takeable": True
},
"plunger": {
"description": "You are holding a typical toilet plunger with a 3ft long wooden handle.",
"takeable": True
},
"key": {
"description": "It's a small, nondescript key",
"takeable": True
},
"crumpled note": {
"description":
'''You are holding a sheet of paper that was crumpled up into a ball before it was seemingly discarded. It reads,
"I can't find my stupid physics binder anywhere! Mr. Schneider is going to kill me when I get to class."''',
"takeable": True
},
"donut": {
"description": "a chocolate donut with multicolored sprinkles",
"takeable": True
},
"meme": {
"description":
'''an element of a culture or system of behavior that may be considered to be passed from one individual
to another by nongenetic means, especially imitation.''',
"takeable": True
},
"basketball": {
"description":
'''It's a basketball, 'nuff said''',
"takeable": True
},
"statue": {
"description":
'''A life-sized bronze statue of George Washington.''',
"takeable": False
},
"random number": {
"description":
'''An unknown phone number written on the wall''',
"takeable": False
},
"deoderant": {
"description": "A basket of potpourri with a lovely lavdendar scent",
"takeable": True
},
"pop tart": {
"description": "this pop tart has a smiling alien face on time",
"takeable": False
},
}
# ============================================================
# Start of the main game
# ============================================================
def player_start(player_name, room_name="Hallway"):
player_add(player_name)
# Print out the welcome message
player_add_msg(player_name, "")
print_intro(player_name)
# Start the player in room_name.
player_set_room(player_name, room_name)
# Print out where the user is starting.
print_room_description(player_name, player_get_room(player_name))
def player_command(player_name, command):
# FUTURE a better/cleaner approach would be to take the command that the
# user entered and split it into the first word and the rest of the command.
# Then use a dictionary to map from the first word (e.g. "take") to a function
# that knows how to handle that command. So we'd have functions for handling
# commands like "bye", "help", "list", etc.
# Lower-case, so that "Bye" is the same as "bye", and remove trailing
# and leading spaces to simplify parsing.
command = command.lower().strip()
# See if the command is one of our special commands.
if command == "bye":
# Print a goodbye message, and tell the caller we're all done.
print_goodbye(player_name)
return False
if command == "help":
print_help(player_name)
elif command == "list":
print_items(player_name)
elif command == "look":
print_room_description(player_name, player_get_room(player_name))
elif command == "check":
check_room_graph()
check_items()
elif check_take_command(player_name, command):
None
elif command.startswith("drop"):
drop_item_command(player_name, player_get_room(player_name), command)
elif command.startswith("examine"):
examine_item_command(player_name, player_get_room(player_name), command)
# See if the command is the name of a door
elif check_move_command(player_name, player_get_room(player_name), command):
# OK, it was a move command... we need to put something here so Python
# is happy (can't have an empty elif block)
None
# See if the command is an action on an item the player
# has, in the appropriate room. If so, take that action
# on that item.
elif check_item_command(player_name, player_get_room(player_name), command):
# OK, it was an action on an item... we need to put something here so Python
# is happy (can't have an empty elif block)
None
# See if the command is something we want to respond to
# with special text.
elif check_general_command(player_name, player_get_room(player_name), command):
# We responded... we need to put something here so Python
# is happy (can't have an empty elif block)
None
# No idea what they want to do
else:
player_add_msg(player_name, "I don't understand that")
return True
|
kkrugler/bitney-adventure
|
bag_engine.py
|
Python
|
apache-2.0
| 45,756
|
[
"exciting"
] |
6ddcad05ec59af4420c8ac26d7802e0b3b60e69f1b526ed284069ccb8f94b7ba
|
"""
gal v0.01.1
ga-bitbot-revived based on:
ga-bitbot application / system launcher
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
__appversion__ = "0.01.1"
print "ga-bitbot application system launcher v%s"%__appversion__
import atexit
import sys
from subprocess import check_output as call, Popen, PIPE
import shlex
from os import environ
import os
from time import *
import hashlib
import random
import __main__
import paths
from load_config import *
import gene_server_config
import xmlrpclib
import json
import gc
def make_pid():
#simple function which spits out a random hex code
#which are used to set globaly unique process ids to spawned clients
md = hashlib.md5()
md.update(str(time()) + str(random.random() * 1000000))
return md.hexdigest()[0:16]
print "-"*80
print "\n\tCommand line options:\n\t\tserver\t\tlaunches only the server components\n\t\tclient\t\tlaunches only the client components\n\t\tall\t\tlaunches all components"
print "\n\tThe default configuration is 'all'"
print "-"*80
run_client = 1
run_server = 1
mode = ""
if len(sys.argv) >= 2:
if sys.argv[1] == 'all':
mode = 'all'
run_client = 1
run_server = 1
print "gal: launching all components"
if sys.argv[1] == 'client':
mode = 'client'
run_client = 1
run_server = 0
print "gal: launching client components only"
if sys.argv[1] == 'xlclient':
mode = 'xlclient'
run_client = 1
run_server = 0
print "gal: launching xlclient components only"
if sys.argv[1] == 'server':
mode = 'server'
run_client = 0
run_server = 1
print "gal: launching server components only"
else:
mode = 'all'
print "gal: launching all components"
sleep(3)
#the variable values below are superceded by the configuration loaded from the
#configuration file global_config.json
#!!!!!!!! to change the values edit the json configuration file NOT the variables below !!!!!!!!
WATCHDOG_TIMEOUT = 60 #seconds
MONITORED_PROCESS_LAUNCH_TIMEOUT = 20 #seconds
GENE_SERVER_STDERR_FILE = "/dev/null"
BCFEED_STDERR_FILE = "/dev/null"
WC_SERVER_STDERR_FILE = "/dev/null"
REPORT_GEN_STDERR_FILE = "/dev/null"
GTS_STDERR_FILE = "/dev/null"
config_loaded = False
#load config
try:
__main__ = load_config_file_into_object('global_config.json',__main__)
except:
print "gal: Error detected while loading the configuration. The application will now exit."
import sys
sys.exit()
else:
if config_loaded == False:
print "gal: Configuration failed to load. The application will now exit."
import sys
sys.exit()
else:
print "gal: Configuration loaded."
#open a null file to redirect stdout/stderr from the launched subprocesses
fnull = open(os.devnull,'w')
if GENE_SERVER_STDERR_FILE == "/dev/null":
GENE_SERVER_STDERR_FILE = fnull
else:
GENE_SERVER_STDERR_FILE = open(GENE_SERVER_STDERR_FILE,'a')
if BCFEED_STDERR_FILE == "/dev/null":
BCFEED_STDERR_FILE = fnull
else:
BCFEED_STDERR_FILE = open(BCFEED_STDERR_FILE,'a')
if WC_SERVER_STDERR_FILE == "/dev/null":
WC_SERVER_STDERR_FILE = fnull
else:
WC_SERVER_STDERR_FILE = open(WC_SERVER_STDERR_FILE,'a')
if REPORT_GEN_STDERR_FILE == "/dev/null":
REPORT_GEN_STDERR_FILE = fnull
else:
REPORT_GEN_STDERR_FILE = open(REPORT_GEN_STDERR_FILE,'a')
if GTS_STDERR_FILE == "/dev/null":
GTS_STDERR_FILE = fnull
else:
GTS_STDERR_FILE = open(GTS_STDERR_FILE,'a')
#configure gts clients based on the mode of operation (all,server or client)
#
# all - balanced
# server - focused on updating scores
# client - focused on finding new genes
#
# At least one gts instance in each mode should not run with the get_config option
# to make sure any new gene_def.json configs get loaded into the db.
#
all_monitored_launch = ['pypy gts.py all n run_once pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py all y run_once get_config score_only pid ',\
'pypy gts.py all y run_once get_config pid ']
server_monitored_launch = ['pypy gts.py all y run_once pid ',\
'pypy gts.py 1 y run_once get_config score_only pid ',\
'pypy gts.py 2 y run_once get_config score_only pid ',\
'pypy gts.py 3 y run_once get_config score_only pid ',\
'pypy gts.py 3 y run_once get_config score_only pid ',\
'pypy gts.py 4 y run_once get_config score_only pid ',\
'pypy gts.py 4 y run_once get_config score_only pid ']
client_monitored_launch = ['pypy gts.py all n run_once pid ',\
'pypy gts.py 1 n run_once get_config pid ',\
'pypy gts.py 2 n run_once get_config pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py all y run_once get_config pid ']
xlclient_monitored_launch = ['pypy gts.py all n run_once pid ',\
'pypy gts.py 1 n run_once get_config pid ',\
'pypy gts.py 2 n run_once get_config pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py 1 n run_once get_config pid ',\
'pypy gts.py 2 n run_once get_config pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 3 n run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 4 n run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 3 y run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py 4 y run_once get_config pid ',\
'pypy gts.py all y run_once get_config pid ']
if mode == 'all':
monitored_launch = all_monitored_launch
if mode == 'server':
monitored_launch = server_monitored_launch
if mode == 'client':
monitored_launch = client_monitored_launch
if mode == 'xlclient':
monitored_launch = xlclient_monitored_launch
unmonitored_launch = ['pypy wc_server.py','pypy report_gen.py']
monitor = {} #variables to track monitored/unmonitored processes
no_monitor = []
def terminate_process(process):
if sys.platform == 'win32':
import ctypes
PROCESS_TERMINATE = 1
pid = process.pid
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
if process.poll() == None:
process.terminate()
process.wait()
# create and register callback function to do a clean shutdown of the system on exit.
def shutdown():
global monitor
global no_monitor
global run_server
for p in no_monitor:
terminate_process(p)
for pid in monitor.keys():
terminate_process(monitor[pid]['process'])
sys.stderr = fnull
if run_server:
server.shutdown()
atexit.register(shutdown)
#capture the price feeds regardless of client or server mode ## commented out bcfeed_synch.py and bcfeed.py to allow for manual starting
#servers need it for reporting and clients need it for processing
#update the dataset
#print "gal: Synching the local datafeed..."
#Popen(shlex.split('python bcfeed_synch.py -d')).wait()
#launch the bcfeed script to collect data from the live feed
#print "gal: Starting the live datafeed capture script..."
#p = Popen(shlex.split('python bcfeed.py'),stdin=fnull, stdout=fnull, stderr=BCFEED_STDERR_FILE)
#no_monitor.append(p)
if run_server:
print "gal: Launching the xmlrpc server..."
Popen(shlex.split('pypy gene_server.py'),stdin=fnull, stdout=fnull, stderr=GENE_SERVER_STDERR_FILE)
sleep(5) #give the server time to start
# connect to the xml server
#
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
print "gal: connected to gene server ",__server__,":",__port__
if mode == 'all' or mode == 'server':
print "gal: gene server db restore: ",server.reload()
print "gal: Launching GA Clients..."
#collect system process PIDS for monitoring.
#(not the same as system OS PIDs -- They are more like GUIDs as this is a multiclient distributed system)
epl = json.loads(server.pid_list()) #get the existing pid list
#start the monitored processes
for cmd_line in monitored_launch:
new_pid = make_pid()
p = Popen(shlex.split(cmd_line + new_pid),stdin=fnull, stdout=fnull, stderr=GTS_STDERR_FILE)
retry = MONITORED_PROCESS_LAUNCH_TIMEOUT
while retry > 0:
sleep(1)
cpl = json.loads(server.pid_list()) #get the current pid list
npl = list(set(epl) ^ set(cpl)) #find the new pid(s)
epl = cpl #update the existing pid list
if new_pid in npl:
monitor.update({new_pid:{'cmd':cmd_line,'process':p}}) #store the pid/cmd_line/process
print "gal: Monitored Process Launched (PID:",new_pid,"CMD:",cmd_line,")"
break
else:
retry -= 1
if retry == 0:
print "gal: ERROR: Monitored Process Failed to Launch","(CMD:",cmd_line,")"
if run_server:
#start unmonitored processes
for cmd_line in unmonitored_launch:
p = Popen(shlex.split(cmd_line),stdin=fnull, stdout=fnull, stderr=fnull)
print "gal: Unmonitored Process Launched (CMD:",cmd_line,")"
no_monitor.append(p) #store the popen instance
sleep(1) #wait a while before starting the report_gen script
print "\ngal: Monitoring Processes..."
count = 0
while 1:
gc.collect()
if run_server:
count += 1
#periodicaly tell the server to save the gene db
if count == 50:
count = 0
server.save()
if run_client == 0:
sleep(30)
#process monitor loop
for pid in monitor.keys():
sleep(5) #check one pid every n seconds.
if server.pid_check(pid,WATCHDOG_TIMEOUT) == "NOK":
#watchdog timed out
print "gal: WATCHDOG: PID",pid,"EXPIRED"
#remove the expired PID
server.pid_remove(pid)
epl = json.loads(server.pid_list()) #get the current pid list
cmd_line = monitor[pid]['cmd']
#terminate the process
terminate_process(monitor[pid]['process'])
monitor.pop(pid)
#launch new process
launching = 1
while launching == 1:
new_pid = make_pid()
p = Popen(shlex.split(cmd_line + new_pid),stdin=fnull, stdout=fnull, stderr=GTS_STDERR_FILE)
retry = MONITORED_PROCESS_LAUNCH_TIMEOUT
while retry > 0:
sleep(1)
cpl = json.loads(server.pid_list()) #get the current pid list
npl = list(set(epl) ^ set(cpl)) #find the new pid(s)
epl = cpl #update the existing pid list
if new_pid in npl:
monitor.update({new_pid:{'cmd':cmd_line,'process':p}}) #store the pid/cmd_line/process
print "gal: Monitored Process Launched (PID:",new_pid,"CMD:",cmd_line,")"
launching = 0
break
else:
retry -= 1
if retry == 0:
print "gal: ERROR: Monitored Process Failed to Launch","(CMD:",cmd_line,")"
fnull.close()
|
iamkingmaker/ga-bitbot-revived
|
gal.py
|
Python
|
gpl-3.0
| 12,452
|
[
"Brian"
] |
45d4e76379dc4251bf66a2f5a90b4108e0dca863e4789b15967d037e77a9b64c
|
import flask
import os
from gevent.pywsgi import WSGIServer
app = flask.Flask(__name__, static_path="/unused")
_basedir = os.path.join("..", os.path.dirname(__file__))
PORT=5009
"""this is a simple server to facilitate developing the docs. by
serving up static files from this server, we avoid the need to use a
symlink.
"""
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/index.html"> Index</a>
"""
@app.route('/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
if __name__ == "__main__":
#app.run(port=PORT)
http_server = WSGIServer(('', PORT), app)
print "\nStarting Bokeh plot server on port %d..." % PORT
print "Visit http://localhost:%d/index.html to see plots\n" % PORT
pid = os.fork()
if pid != 0:
# Parent process
http_server.serve_forever()
else:
# Child process
import time
import webbrowser
time.sleep(0.5)
webbrowser.open("http://localhost:%d/index.html"%PORT, new="tab")
|
sahat/bokeh
|
sphinx/docserver.py
|
Python
|
bsd-3-clause
| 1,178
|
[
"VisIt"
] |
8a7fee4e663154bdf1bcfb97ed97eb220d317310429acb08235fa0814ec1cf94
|
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
import os
import sys
import pwd
import grp
import signal
import logging
import time
import glob
from rts2.json import JSONProxy
from rts2saf.config import Configuration
from rts2saf.environ import Environment
from rts2saf.createdevices import CreateFilters, CreateFilterWheels, CreateFocuser, CreateCCD
from rts2saf.checkdevices import CheckDevices
from rts2saf.focus import Focus
## ToDo ugly
if not os.path.isdir('/tmp/rts2saf_log'):
os.mkdir('/tmp/rts2saf_log')
logging.basicConfig(filename='/tmp/rts2saf_log/unittest.log', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
class Args(object):
def __init__(self):
pass
class RTS2Environment(unittest.TestCase):
def tearDown(self):
processes=['rts2-centrald','rts2-executor', 'rts2-xmlrpcd','rts2-focusd-dummy','rts2-filterd-dummy', 'rts2-camd-dummy']
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
# wildi 7432 0.0 0.1 24692 5192 pts/1 S 17:34 0:01 /usr/local/bin/rts2-centrald
itms= line.split()
exe= itms[10].split('/')[-1]
if self.uid in itms[0] and exe in processes:
pid = int(itms[1])
os.kill(pid, signal.SIGTERM)
# reove th lock files
for fn in glob.glob(self.lockPrefix):
os.unlink (fn)
def setUp(self):
# by name
self.uid=pwd.getpwuid(os.getuid())[0]
self.gid= grp.getgrgid(os.getgid())[0]
# lock prefix
self.lockPrefix = '/tmp/rts2_{}'.format(self.uid)
# sometimes they are present
self.tearDown()
# set up rts2saf
# read configuration
self.rt = Configuration(logger=logger)
self.ev=Environment(debug=False, rt=self.rt,logger=logger)
self.fileName='./rts2saf-bootes-2-autonomous.cfg'
self.success=self.rt.readConfiguration(fileName=self.fileName)
# set up RTS2
# rts2-centrald
cmd=[ '/usr/local/bin/rts2-centrald',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--local-port', '1617',
'--logfile', '/tmp/rts2saf_log/rts2-debug',
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini'
]
self.p_centrald= subprocess.Popen(cmd)
# rts2-executor
cmd=[ '/usr/local/bin/rts2-executor',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'--noauth'
]
self.p_exec= subprocess.Popen(cmd)
# rts2-xmlrpcd
cmd=[ '/usr/local/bin/rts2-xmlrpcd',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'-p', '9999',
'--noauth'
]
self.p_xmlrpcd= subprocess.Popen(cmd)
# rts2-focusd-dummy
focName=self.rt.cfg['FOCUSER_NAME']
cmd=[ '/usr/local/bin/rts2-focusd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', focName,
'--modefile', './f0.modefile'
]
self.p_focusd_dummy= subprocess.Popen(cmd)
# rts2-filterd-dummy
ftwns=list()
for ftwn in self.rt.cfg['inuse']:
ftwns.append(ftwn)
cmd=[ '/usr/local/bin/rts2-filterd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', ftwn
]
ftnames=str()
for ftn in self.rt.cfg['FILTER WHEEL DEFINITIONS'][ftwn]:
ftnames += '{}:'.format(ftn)
if len(ftnames)>0:
cmd.append('-F')
cmd.append(ftnames)
self.p_filterd_dummy= subprocess.Popen(cmd)
# rts2-camd-dummy
name=self.rt.cfg['CCD_NAME']
# '--wheeldev', 'COLWSLT', '--filter-offsets', '1:2:3:4:5:6:7:8'
cmd=[ '/usr/local/bin/rts2-camd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', name,
'--focdev', focName
]
for nm in ftwns:
cmd.append('--wheeldev')
cmd.append(nm)
if nm in self.rt.cfg['inuse'][0]:
cmd.append('--filter-offsets')
cmd.append('1:2:3:4:5:6:7:8')
self.p_camd_dummy= subprocess.Popen(cmd)
#
time.sleep(20)
def setupDevices(self, blind=False):
# setup rts2saf
# fake arguments
self.args=Args()
self.args.sxDebug=False
self.args.blind=blind
self.args.verbose=False
self.args.check=True
self.args.fetchOffsets=True
self.args.exposure= 1.887
self.args.catalogAnalysis=False
self.args.Ds9Display=False
self.args.FitDisplay=False
self.args.flux=True
self.args.dryFitsFiles='../samples_bootes2'
# JSON
self.proxy=JSONProxy(url=self.rt.cfg['URL'],username=self.rt.cfg['USERNAME'],password=self.rt.cfg['PASSWORD'])
# create Focuser
self.foc= CreateFocuser(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger).create()
# create filters
fts=CreateFilters(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger).create()
# create filter wheels
ftwc= CreateFilterWheels(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger, filters=fts, foc=self.foc)
ftws=ftwc.create()
if not ftwc.checkBounds():
logger.error('setupDevice: filter focus ranges out of bounds, exiting')
sys.exit(1)
# create ccd
ccd= CreateCCD(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger, ftws=ftws, fetchOffsets=self.args.fetchOffsets).create()
cdv= CheckDevices(debug=False, proxy=self.proxy, blind=self.args.blind, verbose=self.args.verbose, ccd=ccd, ftws=ftws, foc=self.foc, logger=logger)
cdv.summaryDevices()
cdv.printProperties()
cdv.deviceWriteAccess()
dryFitsFiles=None
if self.args.dryFitsFiles:
dryFitsFiles=glob.glob('{0}/{1}'.format(self.args.dryFitsFiles, self.rt.cfg['FILE_GLOB']))
if len(dryFitsFiles)==0:
logger.error('setupDevice: no FITS files found in:{}'.format(self.args.dryFitsFiles))
logger.info('setupDevice: download a sample from wget http://azug.minpet.unibas.ch/~wildi/rts2saf-test-focus-2013-09-14.tgz')
sys.exit(1)
# ok evrything is there
self.rts2safFoc= Focus(debug=False, proxy=self.proxy, args=self.args, dryFitsFiles=dryFitsFiles, ccd=ccd, foc=self.foc, ftws=ftws, rt=self.rt, ev=self.ev, logger=logger)
|
xyficu/rts2
|
scripts/rts2saf/unittest/rts2_environment.py
|
Python
|
gpl-2.0
| 8,251
|
[
"VisIt"
] |
dfc47376d62b5bbeeedcca9708316865435729ca70024b962b9761f4c12a2861
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from math import sqrt
import inspect
import new
import os
import tinctest
from tinctest import TINCTestCase
from tinctest import logger
@tinctest.skipLoading("Test model. No tests loaded.")
class PerformanceTestCase(TINCTestCase):
"""
This is an abstract class and cannot be instantiated directly.
PerformanceTestCase provides metadata and functionality that
is common to all performance tests
@metadata: repetitions: repeat the execution of the test this many times (default: 1)
@metadata: baseline: a file containing the baseline results (default: None)
@metadata: threshold: if a test performs worse than baseline by threshold percentage,
the test is considered failed (default: 5)
"""
def __init__(self, methodName="runTest"):
self.repetitions = None
self.threshold = None
self.baseline = None
self._orig_testMethodName = methodName
super(PerformanceTestCase, self).__init__(methodName)
def _infer_metadata(self):
super(PerformanceTestCase, self)._infer_metadata()
self.repetitions = int(self._metadata.get('repetitions', '1'))
self.threshold = int(self._metadata.get('threshold', '5'))
self.baseline = self._metadata.get('baseline', None)
def run(self, result=None):
"""
This implementation of run method will generate a new test method that will
run the actual test method 'self.repetitions' number of times and gather
performance stats for the test method such as avg , maxruntime etc
@type result: TINCTextTestResult
@param result: The result object to be used for this particular test instance.
"""
"""
# XXX - Should revisit this approach later. Directly cloning the
# method definition using lambda or new.instancemethod does not
# seem to work well which is what we are trying to do here.
# Currently, we are going with the approach of creating a new test method
# that calls the original test method repetitively and changing
# self._testMethodName to point to the newly created method.
# The bug here will be that the test names used in the result object
# will be the name of the newly created method.
# For PerformanceTestCase, we should run the test method for
# 'self.repetitions' number of times. So, we redefine the original
# test method to actually do it's work multiple times.
self._orig_testMethodName = 'orig_' + self._testMethodName
methd = getattr(self, self._testMethodName)
# The following crazy line of code creates a new instance method named
# 'self._orig_testMethodName' which has the same definition as the
# instance method 'self._testMethodName'.
orig_test_method = lambda self : self.__class__.__dict__[self._testMethodName](self)
orig_test_method.__name__ = 'orig_' + self._testMethodName
setattr(self.__class__, self._orig_testMethodName, orig_test_method)
"""
# For PerformanceTestCase, we should run the test method for
# 'self.repetitions' number of times. So, we create a new instance
# method that runs self._testMethodName the desired number of times
# and set self._testMethodName to the new method before calling super.run().
# Note - The test will be reported using the new method instead of the original
# test method. We will re-visit this later.
self._orig_testMethodName = self._testMethodName
def test_function(my_self):
orig_test_method = getattr(my_self, my_self._orig_testMethodName)
runtime_list = []
for i in range(my_self.repetitions):
# Get time before and after this function to time the test
start = datetime.now()
orig_test_method()
end = datetime.now()
delta = end - start
milli = delta.seconds * 1000 + (float(delta.microseconds) / 1000)
runtime_list.append(milli)
total_runtime = sum(runtime_list)
min_runtime = min(runtime_list)
max_runtime = max(runtime_list)
avg_runtime = total_runtime / my_self.repetitions
std_dev = sqrt(sum((runtime - avg_runtime)**2 for runtime in runtime_list) / my_self.repetitions)
std_dev_pct = std_dev * 100 / float(avg_runtime)
logger.info("%s - %s" % (my_self, runtime_list))
# Find the baseline file. For now, we assume that there is only
# one baseline version specified
current_dir = os.path.dirname(inspect.getfile(my_self.__class__))
baseline_file = 'baseline_' + my_self.baseline + '.csv'
baseline_file_path = os.path.join(current_dir, baseline_file)
(baseline_runtime, delta) = GPPerfDiff.check_perf_deviation(my_self._orig_testMethodName, \
baseline_file_path, avg_runtime, \
my_self.threshold)
# compose statistics
stats = [
('Test Name', "%s.%s" % (self.__class__.__name__, self._orig_testMethodName)),
('Average Runtime', "%0.2f" % avg_runtime),
('Baseline Runtime', "%0.2f" % baseline_runtime),
('% Difference', "%0.2f" % delta),
('Allowable Threshold', "%0.2f" % my_self.threshold),
('Repetitions Performed', "%d" % my_self.repetitions),
('Total Runtime', "%0.2f" % total_runtime),
('Min Runtime', "%0.2f" % min_runtime),
('Max Runtime', "%0.2f" % max_runtime),
('Std Dev', "%0.2f" % std_dev),
('% Std Dev', "%0.2f" % std_dev_pct)
]
header = [x[0] for x in stats]
data = [x[1] for x in stats]
# dump statistics to a runtime_stats.csv file
output_file_path = os.path.join(current_dir, 'runtime_stats.csv')
existing = os.path.exists(output_file_path)
mode = 'a' if existing else 'w'
with open(output_file_path, mode) as f:
if not existing:
f.write("%s\n" % ",".join(header))
f.write("%s\n" % ",".join(data))
self.assertGreater(my_self.threshold, delta, "assert delta < my_self.threshold")
test_method = new.instancemethod(test_function,
self, self.__class__)
self.__dict__[ self._testMethodName + "*"] = test_method
self._testMethodName = self._testMethodName + "*"
super(PerformanceTestCase, self).run(result)
class GPPerfDiff(object):
'''
Utility class for checking performance deviation for performance test cases.
'''
@staticmethod
def check_perf_deviation(test_name, baseline_file, current_runtime, threshold):
runtime = -1
with open(baseline_file, 'r') as f:
for line in f:
tokens = line.strip().split(',')
if len(tokens) != 2:
continue
if tokens[0] == test_name:
runtime = float(tokens[1])
break
if runtime == -1:
return (-1, 100)
delta = int(((current_runtime - runtime)/runtime) * 100)
return (runtime, delta)
|
Quikling/gpdb
|
src/test/tinc/tinctest/models/perf/__init__.py
|
Python
|
apache-2.0
| 8,346
|
[
"VisIt"
] |
f99ad94415b6df04c79f1556f0e906c61b179849eba2ab112d20293e92703048
|
# Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to make a plummed-common input file for umbrella
sampling of the distance between a membrane and one or more solutes
The atom indices are taken from a Gromacs index file
Examples:
make_plumed.py --solutes AAC1 AAC2
"""
import argparse
from sgenlib import groups
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="Making plummed-common input")
parser.add_argument('-n','--ndx',help="the input index file",default="index.ndx")
parser.add_argument('-m','--mem',help="the group name of the membrame",default="Membrane")
parser.add_argument('-s','--solutes',nargs="+",help="the group name of the solutes",default=[])
args = parser.parse_args()
ndxgroups = groups.read_ndxfile(args.ndx)
for i, solute in enumerate(args.solutes, 1) :
print "c%d: COM ATOMS=%d-%d"%(i, ndxgroups[solute][0], ndxgroups[solute][-1])
print "c%d: COM ATOMS=%d-%d"%(len(args.solutes)+1, ndxgroups[args.mem][0],
ndxgroups[args.mem][-1])
print ""
for i in range(1, len(args.solutes)+1):
print "cv%d: DISTANCE ATOMS=c%d,c%d COMPONENTS"%(i, i, len(args.solutes)+1)
|
SGenheden/Scripts
|
Membrane/make_plumed.py
|
Python
|
mit
| 1,220
|
[
"Gromacs"
] |
b1989872dac71219a4582c5995e04cad01be508ab32354c88fb3c42d7963d32c
|
__author__ = 'stephen'
# ===============================================================================
# GLOBAL IMPORTS:
import os,sys
import numpy as np
import argparse
import mdtraj as md
# ===============================================================================
# LOCAL IMPORTS:
HK_DataMiner_Path = os.path.relpath(os.pardir)
#HK_DataMiner_Path = os.path.abspath("/home/stephen/Dropbox/projects/work-2015.5/HK_DataMiner/")
print HK_DataMiner_Path
sys.path.append(HK_DataMiner_Path)
from cluster import KCenters
from lumping import PCCA, PCCA_Standard, SpectralClustering, Ward
from utils import XTCReader, plot_cluster, utils, calculate_landscape, calculate_population
# ===============================================================================
cli = argparse.ArgumentParser()
cli.add_argument('-t', '--trajListFns', default = 'trajlist',
help='List of trajectory files to read in, separated by spaces.')
cli.add_argument('-a', '--atomListFns', default='atom_indices',
help='List of atom index files to read in, separated by spaces.')
cli.add_argument('-g', '--topology', default='native.pdb', help='topology file.')
cli.add_argument('-o', '--homedir', help='Home dir.', default=".", type=str)
cli.add_argument('-e', '--iext', help='''The file extension of input trajectory
files. Must be a filetype that mdtraj.load() can recognize.''',
default="xtc", type=str)
cli.add_argument('-n', '--n_clusters', help='''n_clusters.''',
default=100, type=int)
cli.add_argument('-m', '--n_macro_states', help='''n_macro_states.''',
default=6, type=int)
cli.add_argument('-s', '--stride', help='stride.',
default=None, type=int)
cli.add_argument('-l', '--alignment', default=False, type=bool)
args = cli.parse_args()
trajlistname = args.trajListFns
atom_indicesname = args.atomListFns
trajext = args.iext
File_TOP = args.topology
homedir = args.homedir
n_clusters = args.n_clusters
n_macro_states = args.n_macro_states
stride = args.stride
# ===========================================================================
# Reading Trajs from XTC files
print "stride:", stride
trajreader = XTCReader(trajlistname, atom_indicesname, homedir, trajext, File_TOP, nSubSample=stride)
trajs = trajreader.trajs
traj_len = trajreader.traj_len
np.savetxt("./traj_len.txt", traj_len, fmt="%d")
# ===========================================================================
## get phi psi angles for Alanine Dipeptide
#if os.path.isfile("./phi_angles.txt") and os.path.isfile("./psi_angles.txt") is True:
# phi_angles = np.loadtxt("./phi_angles.txt", dtype=np.float32)
# psi_angles = np.loadtxt("./psi_angles.txt", dtype=np.float32)
# phi_psi = np.column_stack((phi_angles, psi_angles))
#else:
# phi_angles, psi_angles = trajreader.get_phipsi(trajs, psi=[6, 8, 14, 16], phi=[4, 6, 8, 14])
# np.savetxt("./phi_angles.txt", phi_angles, fmt="%f")
# np.savetxt("./psi_angles.txt", psi_angles, fmt="%f")
# ===========================================================================
# superpose
print "Alignment?", args.alignment
if args.alignment is True:
align_atom_indices = np.loadtxt('align_atom_indices', dtype=np.int32).tolist()
print "align_atom_indices:", align_atom_indices
trajs.superpose(reference=trajs[0], frame=0, atom_indices=align_atom_indices)
print "Alignment done."
# ===========================================================================
# ===========================================================================
# Just keep the atoms in atom indices, remove other atoms
atom_indices = np.loadtxt('atom_indices', dtype=np.int32).tolist()
print "atom_indices:", atom_indices
trajs_sub_atoms = trajs.atom_slice(atom_indices, inplace=False) #just keep the the atoms in atom indices
print "Trajs:", trajs
print "Sub_atoms_trajs:", trajs_sub_atoms
# ===========================================================================
# Reading Clustering Centers
centers = md.load("cluster_centers_sub_atoms.pdb")
print "Centers:", centers
# ===========================================================================
# do Assigning using KCenters method
#cluster = KCenters(n_clusters=n_clusters, metric="euclidean", random_state=0)
cluster = KCenters(centers=centers, n_clusters=n_clusters, metric="rmsd", random_state=0)
print cluster
#cluster.fit(phi_psi)
#cluster.fit(trajs_sub_atoms)
cluster.assign(trajs_sub_atoms, cluster)
labels = cluster.labels_
print labels
n_microstates = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_microstates)
# plot micro states
clustering_name = "kcenters_assign_n_" + str(n_microstates)
np.savetxt("assignments_"+clustering_name+".txt", labels, fmt="%d")
#plot_cluster(labels=labels, phi_angles=phi_angles, psi_angles=psi_angles, name=clustering_name)
#calculate_landscape(labels=labels, centers=cluster_centers_, phi_angles=phi_angles, psi_angles=psi_angles, potential=False, name=clustering_name)
#calculate_population(labels=labels, name=clustering_name)
|
stephenliu1989/HK_DataMiner
|
hkdataminer/scripts/test_kcenters_assign.py
|
Python
|
apache-2.0
| 5,118
|
[
"MDTraj"
] |
58f4e5488fa44b3885592fb54aa60856abf2f26b40a4906822d979c4770ad0ae
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2017 Alois Poettker <alois.poettker@gmx.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Change IDs of all elements in the database to conform to the
scheme specified in the database's prefix ids
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import re
from gi.repository import Gtk, Gdk
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import URL_MANUAL_PAGE
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.config import config
from gramps.gen.db import DbTxn
from gramps.gen.updatecallback import UpdateCallback
from gramps.gui.display import display_help
from gramps.gui.glade import Glade
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.plug import tool
from gramps.gui.utils import ProgressMeter
from gramps.gui.widgets import MonitoredCheckbox, MonitoredEntry
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Reorder_Gramps_ID')
PREFIXES = {'person': 'i', 'family': 'f', 'event': 'e', 'place': 'p',
'source': 's', 'citation': 'c', 'repository': 'r',
'media': 'o', 'note': 'n'}
#-------------------------------------------------------------------------
#
# Actual tool
#
#-------------------------------------------------------------------------
# gets the prefix, number, suffix specified in a format string, eg:
# P%04dX returns 'P', '04', 'X' It has to have the integer format with at
# least 3 digits to pass.
_parseformat = re.compile(r'(^[^\d]*)%(0[3-9])d([^\d]*$)')
class ReorderEntry(object):
""" Class for internal values for every primary object """
def __init__(self, object_fmt, quant_id, nextgramps_id, obj):
self.object = obj
self.width_fmt = 4
self.object_fmt, self.object_prefix, self.object_suffix = '', '', ''
self.quant_id, self.actual_id, self.actgramps_id = 0, 0, '0'
self.calc_id(object_fmt, quant_id)
self.stored_fmt = self.object_fmt
self.stored_prefix = self.object_prefix
self.stored_suffix = self.object_suffix
self.number_id = int(nextgramps_id[len(self.object_prefix):
(len(nextgramps_id) -
len(self.object_suffix))])
self.step_id = 1
self.active_obj, self.change_obj, self.keep_obj = True, False, False
def set_active(self, active):
""" sets Change flag """
self.active_obj = active
def get_active(self):
""" gets Change flag """
return self.active_obj
def set_fmt(self, object_fmt):
""" sets primary object format """
if object_fmt:
self.calc_id(object_fmt.strip(), self.quant_id)
def get_fmt(self):
""" gets primary object format """
return self.object_fmt
def res_fmt(self):
""" restore primary object format """
return self.stored_fmt
def set_change(self, change):
""" sets Change flag """
self.change_obj = change
def get_change(self):
""" gets Change flag """
return self.change_obj
def __ret_gid(self, actual):
""" return Gramps ID in correct format """
return '%s%s%s' % \
(self.object_prefix, str(actual).zfill(self.width_fmt),
self.object_suffix)
def calc_id(self, object_fmt, quant_id):
""" calculates identifier prefix, suffix, format & actual value.
Requires a valid format or returns the default instead """
self.object_fmt, self.quant_id = object_fmt, quant_id
# Default values, ID counting starts with zero!
formatmatch = _parseformat.match(object_fmt)
if formatmatch:
self.object_prefix = formatmatch.groups()[0]
self.width_fmt = int(formatmatch.groups()[1])
self.object_suffix = formatmatch.groups()[2]
else: # not a legal format string, use default
self.object_prefix = PREFIXES[self.object].upper()
self.width_fmt = 4
self.object_suffix = ''
self.object_fmt = PREFIXES[self.object].upper() + "%04d"
self.actgramps_id = self.__ret_gid(self.actual_id)
def zero_id(self):
""" provide zero Start ID """
return self.__ret_gid(0)
def set_id(self, actual):
""" sets Start ID """
text = ''.join([i for i in actual.strip() if i in '0123456789'])
self.actual_id = int(text) if text else 0
def get_id(self):
""" gets Start ID """
return self.__ret_gid(self.actual_id)
def next_id(self):
""" provide next Start ID """
return self.__ret_gid(self.number_id)
def succ_id(self):
""" provide next actual Gramps ID """
self.actual_id += self.step_id
self.actgramps_id = self.__ret_gid(self.actual_id)
return self.actgramps_id
def last_id(self):
""" provide quantities of Gramps IDs """
if self.quant_id > 0:
return self.__ret_gid(self.quant_id - 1)
else:
return self.__ret_gid(0)
def set_step(self, step):
""" sets ID Step width """
text = ''.join([i for i in step.strip() if i in '0123456789'])
self.step_id = int(text) if text else 1
def get_step(self):
""" gets ID Step width """
return str(self.step_id)
def change_step(self, step_entry):
""" change Glade Step entry """
step_id = step_entry.get_text().strip()
if step_id and step_id != str(self.step_id):
step_entry.set_text(str(self.step_id))
def set_keep(self, keep):
""" sets Keep flag """
self.keep_obj = keep
def get_keep(self):
""" gets Keep flag """
return self.keep_obj
class ReorderIds(tool.BatchTool, ManagedWindow, UpdateCallback):
""" Class for Reodering Gramps ID Tool """
xobjects = (('person', 'people'), ('family', 'families'),
('event', 'events'), ('place', 'places'),
('source', 'sources'), ('citation', 'citations'),
('repository', 'repositories'),
('media', 'media'), ('note', 'notes'))
def build_menu_names_(self, widget=None):
""" The menu name """
return (_('Main window'), _("Reorder Gramps IDs"))
def __init__(self, dbstate, user, options_class, name, callback=None):
self.uistate = user.uistate
self.db = dbstate.db
if self.uistate:
tool.BatchTool.__init__(self, dbstate, user, options_class, name)
if self.fail:
return # user denied to modify Gramps IDs
ManagedWindow.__init__(self, self.uistate, [], self.__class__)
if not self.uistate:
UpdateCallback.__init__(self, user.callback)
self.object_status = True
self.change_status = False
self.start_zero = True
self.step_cnt, self.step_list = 0, ['1', '2', '5', '10']
self.keep_status = True
self.obj_values = {} # enable access to all internal values
self.active_entries, self.format_entries = {}, {}
self.change_entries = {}
self.start_entries, self.step_entries = {}, {}
self.keep_entries = {}
self.prim_methods, self.obj_methods = {}, {}
for prim_obj, prim_objs in self.xobjects:
iter_handles = "iter_%s_handles" % prim_obj
get_number_obj = "get_number_of_%s" % prim_objs
prefix_fmt = "%s_prefix" % prim_obj
get_from_id = "get_%s_from_gramps_id" % prim_obj
get_from_handle = "get_%s_from_handle" % prim_obj
next_from_id = "find_next_%s_gramps_id" % prim_obj
commit = "commit_%s" % prim_obj
self.prim_methods[prim_obj] = (getattr(self.db, prefix_fmt),
getattr(self.db, get_number_obj)(),
getattr(self.db, next_from_id)())
self.obj_methods[prim_obj] = (getattr(self.db, iter_handles),
getattr(self.db, commit),
getattr(self.db, get_from_id),
getattr(self.db, get_from_handle),
getattr(self.db, next_from_id))
object_fmt, quant_id, next_id = self.prim_methods[prim_obj]
obj_value = ReorderEntry(object_fmt, quant_id, next_id, prim_obj)
self.obj_values[prim_obj] = obj_value
if self.uistate:
self._display()
else:
self._execute()
def __on_object_button_clicked(self, widget=None):
""" compute all primary objects and toggle the 'Active' attribute """
self.object_status = not self.object_status
for prim_obj, dummy in self.xobjects:
obj = self.top.get_object('%s_active' % prim_obj)
obj.set_active(self.object_status)
def __on_object_button_toggled(self, widget):
""" compute the primary object and toggle the 'Sensitive' attribute """
obj_state = widget.get_active()
obj_name = Gtk.Buildable.get_name(widget).split('_', 1)[0]
self.active_entries[obj_name].set_val(obj_state)
for obj_entry in ['actual', 'quant', 'format', 'change']:
obj = self.top.get_object('%s_%s' % (obj_name, obj_entry))
obj.set_sensitive(obj_state)
for obj_entry in ['start', 'step', 'keep']:
obj = self.top.get_object('%s_change' % obj_name)
if obj.get_active():
obj = self.top.get_object('%s_%s' % (obj_name, obj_entry))
obj.set_sensitive(obj_state)
def __on_format_button_clicked(self, widget=None):
""" compute all sensitive primary objects and sets the
'Format' scheme of identifiers """
for prim_obj, dummy in self.xobjects:
obj_format = self.top.get_object('%s_format' % prim_obj)
if not obj_format.get_sensitive():
continue
obj_fmt = self.obj_values[prim_obj].res_fmt()
self.format_entries[prim_obj].force_value(obj_fmt)
if self.start_zero:
obj_id = self.obj_values[prim_obj].zero_id()
else:
obj_id = self.obj_values[prim_obj].last_id()
self.start_entries[prim_obj].force_value(obj_id)
def __on_change_button_clicked(self, widget=None):
""" compute all primary objects and toggle the 'Change' attribute """
self.change_status = not self.change_status
for prim_obj, dummy in self.xobjects:
obj_change = self.top.get_object('%s_change' % prim_obj)
if not obj_change.get_sensitive():
continue
self.change_entries[prim_obj].set_val(self.change_status)
obj_change.set_active(self.change_status)
def __on_change_button_toggled(self, widget):
""" compute the primary object and toggle the 'Sensitive' attribute """
obj_state = widget.get_active()
obj_name = Gtk.Buildable.get_name(widget).split('_', 1)[0]
for obj_entry in ['start', 'step', 'keep']:
obj = self.top.get_object('%s_%s' % (obj_name, obj_entry))
if obj_entry == 'keep':
if (self.obj_values[obj_name].stored_prefix !=
self.obj_values[obj_name].object_prefix and
self.obj_values[obj_name].stored_suffix !=
self.obj_values[obj_name].object_suffix):
self.keep_entries[obj_name].set_val(False)
else:
obj.set_active(obj_state)
self.keep_entries[obj_name].set_val(obj_state)
obj.set_sensitive(obj_state)
def __on_start_button_clicked(self, widget=None):
""" compute all sensitive primary objects and sets the
'Start' values of identifiers """
self.start_zero = not self.start_zero
for prim_obj, dummy in self.xobjects:
obj = self.top.get_object('%s_start' % prim_obj)
if not obj.get_sensitive():
continue
if self.start_zero:
obj_id = self.obj_values[prim_obj].zero_id()
else:
obj_id = self.obj_values[prim_obj].next_id()
self.start_entries[prim_obj].force_value(obj_id)
def __on_step_button_clicked(self, widget=None):
""" compute all sensitive primary objects and sets the
'Step' width of identifiers """
self.step_cnt = self.step_cnt + 1 if self.step_cnt < 3 else 0
for prim_obj, dummy in self.xobjects:
obj = self.top.get_object('%s_step' % prim_obj)
if not obj.get_sensitive():
continue
step_val = self.step_list[self.step_cnt]
self.step_entries[prim_obj].force_value(step_val)
def __on_keep_button_clicked(self, widget=None):
""" compute the primary object and toggle the 'Active' attribute """
self.keep_status = not self.keep_status
for prim_obj, dummy in self.xobjects:
obj = self.top.get_object('%s_change' % prim_obj)
if not obj.get_active():
continue
obj = self.top.get_object('%s_keep' % prim_obj)
obj.set_active(self.keep_status)
self.keep_entries[prim_obj].set_val(self.keep_status)
def __on_format_entry_keyrelease(self, widget, event, data=None):
""" activated on all return's of an entry """
if event.keyval in [Gdk.KEY_Return]:
obj_name = Gtk.Buildable.get_name(widget).split('_', 1)[0]
obj_fmt = self.format_entries[obj_name].get_val()
self.format_entries[obj_name].force_value(obj_fmt)
self.start_entries[obj_name].update()
obj_change = self.top.get_object('%s_change' % obj_name)
obj_change.grab_focus()
return False
def __on_format_entry_focusout(self, widget, event, data=None):
""" activated on all focus out of an entry """
obj_name = Gtk.Buildable.get_name(widget).split('_', 1)[0]
obj_fmt = self.format_entries[obj_name].get_val()
self.format_entries[obj_name].set_text(obj_fmt)
self.start_entries[obj_name].update()
return False
def __on_start_entry_focusout(self, widget, event, data=None):
""" activated on all focus out of an entry """
obj_name = Gtk.Buildable.get_name(widget).split('_', 1)[0]
self.start_entries[obj_name].update()
return False
def __on_ok_button_clicked(self, widget=None):
""" execute the reodering and close """
self._execute()
self._update()
self.close()
def __on_cancel_button_clicked(self, widget=None):
""" cancel the reodering and close """
self.close()
def __on_help_button_clicked(self, widget=None):
""" display the relevant portion of Gramps manual """
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def _display(self):
""" organize Glade 'Reorder IDs' window """
# get the main window from glade
self.top = Glade(toplevel="reorder-ids")
window = self.top.toplevel
# set gramps style title for the window
self.set_window(window, self.top.get_object("title"),
_("Reorder Gramps IDs"))
# connect signals
self.top.connect_signals({
"on_object_button_clicked" : self.__on_object_button_clicked,
"on_object_button_toggled" : self.__on_object_button_toggled,
"on_format_button_clicked" : self.__on_format_button_clicked,
"on_start_button_clicked" : self.__on_start_button_clicked,
"on_step_button_clicked" : self.__on_step_button_clicked,
"on_keep_button_clicked" : self.__on_keep_button_clicked,
"on_change_button_clicked" : self.__on_change_button_clicked,
"on_change_button_toggled" : self.__on_change_button_toggled,
"on_format_entry_keyrelease" : self.__on_format_entry_keyrelease,
"on_format_entry_focusout" : self.__on_format_entry_focusout,
"on_start_entry_focusout" : self.__on_start_entry_focusout,
"on_help_button_clicked" : self.__on_help_button_clicked,
"on_cancel_button_clicked" : self.__on_cancel_button_clicked,
"on_ok_button_clicked" : self.__on_ok_button_clicked
})
# Calculate all entries and update Glade window
for prim_obj, dummy in self.xobjects:
# populate Object, Actual & Quantity fields with values
obj_active = self.top.get_object('%s_active' % prim_obj)
self.active_entries[prim_obj] = MonitoredCheckbox(
obj_active, obj_active, self.obj_values[prim_obj].set_active,
self.obj_values[prim_obj].get_active)
obj_actual = self.top.get_object('%s_actual' % prim_obj)
obj_actual.set_text('%s' % self.obj_values[prim_obj].last_id())
obj_quant = self.top.get_object('%s_quant' % prim_obj)
obj_quant.set_text('%s' % str(self.obj_values[prim_obj].quant_id))
# connect/populate Format, Start, Step, Keep & Change fields
# with GTK/values
obj_format = self.top.get_object('%s_format' % prim_obj)
self.format_entries[prim_obj] = MonitoredEntry(
obj_format, self.obj_values[prim_obj].set_fmt,
self.obj_values[prim_obj].get_fmt)
obj_change = self.top.get_object('%s_change' % prim_obj)
self.change_entries[prim_obj] = MonitoredCheckbox(
obj_change, obj_change, self.obj_values[prim_obj].set_change,
self.obj_values[prim_obj].get_change)
obj_start = self.top.get_object('%s_start' % prim_obj)
self.start_entries[prim_obj] = MonitoredEntry(
obj_start, self.obj_values[prim_obj].set_id,
self.obj_values[prim_obj].get_id)
obj_step = self.top.get_object('%s_step' % prim_obj)
self.step_entries[prim_obj] = MonitoredEntry(
obj_step, self.obj_values[prim_obj].set_step,
self.obj_values[prim_obj].get_step,
changed=self.obj_values[prim_obj].change_step)
obj_keep = self.top.get_object('%s_keep' % prim_obj)
self.keep_entries[prim_obj] = MonitoredCheckbox(
obj_keep, obj_keep, self.obj_values[prim_obj].set_keep,
self.obj_values[prim_obj].get_keep, readonly=True)
# fetch the popup menu
self.menu = self.top.get_object("popup_menu")
# ok, let's see what we've done
self.window.resize(700, 410)
self.show()
def _update(self):
""" store changed objects formats in DB """
update = False
for prim_obj, dummy in self.xobjects:
obj_value = self.obj_values[prim_obj]
if obj_value.object_fmt != obj_value.stored_fmt:
constant = 'preferences.%sprefix' % PREFIXES[prim_obj]
config.set(constant, obj_value.object_fmt)
update = True
if update:
config.save()
self.db.set_prefixes(
config.get('preferences.iprefix'),
config.get('preferences.oprefix'),
config.get('preferences.fprefix'),
config.get('preferences.sprefix'),
config.get('preferences.cprefix'),
config.get('preferences.pprefix'),
config.get('preferences.eprefix'),
config.get('preferences.rprefix'),
config.get('preferences.nprefix'))
def _execute(self):
""" execute all primary objects and reorder if neccessary """
# Update progress calculation
if self.uistate:
self.progress = ProgressMeter(_('Reorder Gramps IDs'), '')
else:
total_objs = 0
for prim_obj, dummy in self.xobjects:
if self.obj_values[prim_obj].active_obj:
total_objs += self.obj_values[prim_obj].quant_id
self.set_total(total_objs)
# Update database
self.db.disable_signals()
for prim_obj, prim_objs in self.xobjects:
with DbTxn(_('Reorder %s IDs ...') % prim_obj,
self.db, batch=True) as self.trans:
if self.obj_values[prim_obj].active_obj:
if self.uistate:
self.progress.set_pass(
_('Reorder %s IDs ...') % _(prim_objs.title()),
self.obj_values[prim_obj].quant_id)
# Process reordering
self._reorder(prim_obj)
self.db.enable_signals()
self.db.request_rebuild()
# Update progress calculation
if self.uistate:
self.progress.close()
else:
print('\nDone.')
# finds integer portion in a GrampsID
_findint = re.compile(r'^[^\d]*(\d+)[^\d]*$')
# finds prefix, number, suffix of a Gramps ID ignoring a leading or
# trailing space. The number must be at least three digits.
_prob_id = re.compile(r'^ *([^\d]*)(\d{3,9})([^\d]*) *$')
def _reorder(self, prim_obj):
""" reorders all selected objects with a (new) style, start & step """
dup_ids = [] # list of duplicate identifiers
new_ids = {} # list of new identifiers
iter_handles, commit, get_from_id, get_from_handle, next_from_id = \
self.obj_methods[prim_obj]
prefix_fmt = self.obj_values[prim_obj].get_fmt()
prefix = self.obj_values[prim_obj].object_prefix
suffix = self.obj_values[prim_obj].object_suffix
old_pref = self.obj_values[prim_obj].stored_prefix
old_suff = self.obj_values[prim_obj].stored_suffix
new_id = self.obj_values[prim_obj].get_id()
keep_fmt = self.obj_values[prim_obj].get_keep()
change = self.obj_values[prim_obj].get_change()
index_max = int("9" * self.obj_values[prim_obj].width_fmt)
do_same = False
for handle in iter_handles():
# Update progress
if self.uistate:
self.progress.step()
else:
self.update()
# extract basic data out of the database
obj = get_from_handle(handle)
act_id = obj.get_gramps_id()
# here we see if the ID looks like a new or previous or default
# Gramps ID.
# If not we ask user if he really wants to replace it.
# This should allow user to protect a GetGov ID or similar
match = self._prob_id.match(act_id)
if not (match and
(prefix == match.groups()[0] and
suffix == match.groups()[2] or
old_pref == match.groups()[0] and
old_suff == match.groups()[2] or
len(match.groups()[0]) == 1 and
len(match.groups()[2]) == 0)) and not do_same:
xml = Glade(toplevel='dialog')
top = xml.toplevel
# self.top.set_icon(ICON)
top.set_title("%s - Gramps" % _("Reorder Gramps IDs"))
apply_to_rest = xml.get_object('apply_to_rest')
label1 = xml.get_object('toplabel')
label1.set_text('<span weight="bold" size="larger">%s</span>' %
_("Reorder Gramps IDs"))
label1.set_use_markup(True)
label2 = xml.get_object('mainlabel')
label2.set_text(_("Do you want to replace %s?" % act_id))
top.set_transient_for(self.progress._ProgressMeter__dialog)
self.progress._ProgressMeter__dialog.set_modal(False)
top.show()
response = top.run()
do_same = apply_to_rest.get_active()
top.destroy()
self.progress._ProgressMeter__dialog.set_modal(True)
if response != Gtk.ResponseType.YES:
continue
elif not match and do_same and response != Gtk.ResponseType.YES:
continue
if change:
# update the defined ID numbers into objects under
# consideration of keeping ID if format not matches prefix
# (implication logical boolean operator below)
if act_id.startswith(prefix) and act_id.endswith(suffix) or \
not keep_fmt:
obj.set_gramps_id(new_id)
commit(obj, self.trans)
new_id = self.obj_values[prim_obj].succ_id()
else:
# attempt to extract integer - if we can't, treat it as a
# duplicate
try:
match = self._findint.match(act_id)
if match:
# get the integer, build the new handle. Make sure it
# hasn't already been chosen. If it has, put this
# in the duplicate handle list
index = int(match.groups()[0])
if index > index_max:
new_id = next_from_id()
else:
new_id = prefix_fmt % index
if new_id == act_id:
if new_id in new_ids:
dup_ids.append(obj.get_handle())
else:
new_ids[new_id] = act_id
elif get_from_id(new_id) is not None:
dup_ids.append(obj.get_handle())
else:
obj.set_gramps_id(new_id)
commit(obj, self.trans)
new_ids[new_id] = act_id
else:
dup_ids.append(handle)
except:
dup_ids.append(handle)
# go through the duplicates, looking for the first available
# handle that matches the new scheme.
if dup_ids:
if self.uistate:
self.progress.set_pass(_('Finding and assigning unused IDs.'),
len(dup_ids))
for handle in dup_ids:
obj = get_from_handle(handle)
obj.set_gramps_id(next_from_id())
commit(obj, self.trans)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class ReorderIdsOptions(tool.ToolOptions):
""" Defines options and provides handling interface. """
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
|
dermoth/gramps
|
gramps/plugins/tool/reorderids.py
|
Python
|
gpl-2.0
| 28,505
|
[
"Brian"
] |
66dab8357103fc67e343deb8330fe0afd315025144741c8310c032c319835242
|
########################################################################
# File : ComputingElement.py
# Author : Stuart Paterson, A.T.
########################################################################
""" The Computing Element class is a base class for all the various
types CEs. It serves several purposes:
- collects general CE related parameters to generate CE description
for the job matching
- provides logic for evaluation of the number of available CPU slots
- provides logic for the proxy renewal while executing jobs
The CE parameters are collected from the following sources, in hierarchy
descending order:
- parameters provided through setParameters() method of the class
- parameters in /LocalSite configuration section
- parameters in /LocalSite/<ceName>/ResourceDict configuration section
- parameters in /LocalSite/ResourceDict configuration section
- parameters in /LocalSite/<ceName> configuration section
- parameters in /Resources/Computing/<ceName> configuration section
- parameters in /Resources/Computing/CEDefaults configuration section
The ComputingElement objects are usually instantiated with the help of
ComputingElementFactory.
"""
from __future__ import print_function
import os
import multiprocessing
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.Core.Security.ProxyFile import writeToProxyFile
from DIRAC.Core.Security.ProxyInfo import getProxyInfoAsString
from DIRAC.Core.Security.ProxyInfo import formatProxyInfoAsString
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.VOMS import VOMS
from DIRAC.Core.Security import CS
from DIRAC.Core.Security import Properties
from DIRAC.Core.Utilities.Time import dateTime, second
from DIRAC import S_OK, S_ERROR, gLogger, version
from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader
__RCSID__ = "$Id$"
INTEGER_PARAMETERS = ['CPUTime', 'NumberOfProcessors']
FLOAT_PARAMETERS = []
LIST_PARAMETERS = ['Tag', 'RequiredTag']
WAITING_TO_RUNNING_RATIO = 0.5
MAX_WAITING_JOBS = 1
MAX_TOTAL_JOBS = 1
class ComputingElement(object):
""" ComputingElement base class """
#############################################################################
def __init__(self, ceName):
""" Standard constructor
"""
self.log = gLogger.getSubLogger(ceName)
self.ceName = ceName
self.ceType = ''
self.ceParameters = {}
self.proxy = ''
self.valid = None
self.mandatoryParameters = []
self.batch = None
self.batchSystem = None
self.batchModuleFile = None
self.minProxyTime = gConfig.getValue('/Registry/MinProxyLifeTime', 10800) # secs
self.defaultProxyTime = gConfig.getValue('/Registry/DefaultProxyLifeTime', 43200) # secs
self.proxyCheckPeriod = gConfig.getValue('/Registry/ProxyCheckingPeriod', 3600) # secs
self.initializeParameters()
def setProxy(self, proxy, valid=0):
""" Set proxy for this instance
"""
self.proxy = proxy
self.valid = dateTime() + second * valid
def _prepareProxy(self):
""" Set the environment variable X509_USER_PROXY
"""
if not self.proxy:
result = getProxyInfo()
if not result['OK']:
return S_ERROR("No proxy available")
if "path" in result['Value']:
os.environ['X509_USER_PROXY'] = result['Value']['path']
return S_OK()
else:
result = gProxyManager.dumpProxyToFile(self.proxy, requiredTimeLeft=self.minProxyTime)
if not result['OK']:
return result
os.environ['X509_USER_PROXY'] = result['Value']
gLogger.debug("Set proxy variable X509_USER_PROXY to %s" % os.environ['X509_USER_PROXY'])
return S_OK()
def isProxyValid(self, valid=1000):
""" Check if the stored proxy is valid
"""
if not self.valid:
result = S_ERROR('Proxy is not valid for the requested length')
result['Value'] = 0
return result
delta = self.valid - dateTime()
totalSeconds = delta.days * 86400 + delta.seconds
if totalSeconds > valid:
return S_OK(totalSeconds - valid)
result = S_ERROR('Proxy is not valid for the requested length')
result['Value'] = totalSeconds - valid
return result
def initializeParameters(self):
""" Initialize the CE parameters after they are collected from various sources
"""
# Collect global defaults first
for section in ['/Resources/Computing/CEDefaults', '/Resources/Computing/%s' % self.ceName]:
result = gConfig.getOptionsDict(section)
if result['OK']:
ceOptions = result['Value']
for key in ceOptions:
if key in INTEGER_PARAMETERS:
ceOptions[key] = int(ceOptions[key])
if key in FLOAT_PARAMETERS:
ceOptions[key] = float(ceOptions[key])
if key in LIST_PARAMETERS:
ceOptions[key] = gConfig.getValue(os.path.join(section, key), [])
self.ceParameters.update(ceOptions)
# Get local CE configuration
localConfigDict = getCEConfigDict(self.ceName)
self.ceParameters.update(localConfigDict)
# Adds site level parameters
section = '/LocalSite'
result = gConfig.getOptionsDict(section)
if result['OK'] and result['Value']:
localSiteParameters = result['Value']
self.log.debug('Local site parameters are: %s' % (localSiteParameters))
for option, value in localSiteParameters.iteritems():
if option == 'Architecture':
self.ceParameters['Platform'] = value
self.ceParameters['Architecture'] = value
elif option == 'LocalSE':
self.ceParameters['LocalSE'] = value.split(', ')
else:
self.ceParameters[option] = value
self._addCEConfigDefaults()
def isValid(self):
""" Check the sanity of the Computing Element definition
"""
for par in self.mandatoryParameters:
if par not in self.ceParameters:
return S_ERROR('Missing Mandatory Parameter in Configuration: %s' % par)
return S_OK()
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
self.ceParameters['WaitingToRunningRatio'] = float(
self.ceParameters.get('WaitingToRunningRatio', WAITING_TO_RUNNING_RATIO))
self.ceParameters['MaxWaitingJobs'] = int(self.ceParameters.get('MaxWaitingJobs', MAX_WAITING_JOBS))
self.ceParameters['MaxTotalJobs'] = int(self.ceParameters.get('MaxTotalJobs', MAX_TOTAL_JOBS))
def _reset(self):
""" Make specific CE parameter adjustments after they are collected or added
"""
pass
def loadBatchSystem(self):
""" Instantiate object representing the backend batch system
"""
if self.batchSystem is None:
self.batchSystem = self.ceParameters['BatchSystem']
objectLoader = ObjectLoader()
result = objectLoader.loadObject('Resources.Computing.BatchSystems.%s' % self.batchSystem, self.batchSystem)
if not result['OK']:
gLogger.error('Failed to load batch object: %s' % result['Message'])
return result
batchClass = result['Value']
self.batchModuleFile = result['ModuleFile']
self.batch = batchClass()
self.log.info("Batch system class from module: ", self.batchModuleFile)
def setParameters(self, ceOptions):
""" Add parameters from the given dictionary overriding the previous values
:param dict ceOptions: CE parameters dictionary to update already defined ones
"""
self.ceParameters.update(ceOptions)
# At this point we can know the exact type of CE,
# try to get generic parameters for this type
ceType = self.ceParameters.get('CEType')
if ceType:
result = gConfig.getOptionsDict('/Resources/Computing/%s' % ceType)
if result['OK']:
generalCEDict = result['Value']
generalCEDict.update(self.ceParameters)
self.ceParameters = generalCEDict
# If NumberOfProcessors is present in the description but is equal to zero
# interpret it as needing local evaluation
if self.ceParameters.get("NumberOfProcessors", -1) == 0:
self.ceParameters["NumberOfProcessors"] = multiprocessing.cpu_count()
for key in ceOptions:
if key in INTEGER_PARAMETERS:
self.ceParameters[key] = int(self.ceParameters[key])
if key in FLOAT_PARAMETERS:
self.ceParameters[key] = float(self.ceParameters[key])
self._reset()
return S_OK()
def getParameterDict(self):
""" Get the CE complete parameter dictionary
"""
return self.ceParameters
#############################################################################
def setCPUTimeLeft(self, cpuTimeLeft=None):
"""Update the CPUTime parameter of the CE classAd, necessary for running in filling mode
"""
if not cpuTimeLeft:
# do nothing
return S_OK()
try:
intCPUTimeLeft = int(cpuTimeLeft)
except ValueError:
return S_ERROR('Wrong type for setCPUTimeLeft argument')
self.ceParameters['CPUTime'] = intCPUTimeLeft
return S_OK(intCPUTimeLeft)
#############################################################################
def available(self, jobIDList=None):
"""This method returns the number of available slots in the target CE. The CE
instance polls for waiting and running jobs and compares to the limits
in the CE parameters.
:param jobIDList: list of already existing job IDs to be checked against
:type jobIDList: python:list
"""
# If there are no already registered jobs
if jobIDList is not None and not jobIDList:
result = S_OK()
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
result['SubmittedJobs'] = 0
else:
result = self.ceParameters.get('CEType')
if result and result == 'CREAM':
result = self.getCEStatus(jobIDList)
else:
result = self.getCEStatus()
if not result['OK']:
return result
runningJobs = result['RunningJobs']
waitingJobs = result['WaitingJobs']
submittedJobs = result['SubmittedJobs']
availableProcessors = result.get('AvailableProcessors')
ceInfoDict = dict(result)
maxTotalJobs = int(self.ceParameters.get('MaxTotalJobs', 0))
ceInfoDict['MaxTotalJobs'] = maxTotalJobs
waitingToRunningRatio = float(self.ceParameters.get('WaitingToRunningRatio', 0.0))
# if there are no Running job we can submit to get at most 'MaxWaitingJobs'
# if there are Running jobs we can increase this to get a ratio W / R 'WaitingToRunningRatio'
maxWaitingJobs = int(max(int(self.ceParameters.get('MaxWaitingJobs', 0)),
runningJobs * waitingToRunningRatio))
self.log.verbose('Max Number of Jobs:', maxTotalJobs)
self.log.verbose('Max W/R Ratio:', waitingToRunningRatio)
self.log.verbose('Max Waiting Jobs:', maxWaitingJobs)
# Determine how many more jobs can be submitted
message = '%s CE: SubmittedJobs=%s' % (self.ceName, submittedJobs)
message += ', WaitingJobs=%s, RunningJobs=%s' % (waitingJobs, runningJobs)
totalJobs = runningJobs + waitingJobs
message += ', MaxTotalJobs=%s' % (maxTotalJobs)
if totalJobs >= maxTotalJobs:
self.log.verbose('Max Number of Jobs reached:', maxTotalJobs)
result['Value'] = 0
message = 'There are %s waiting jobs and total jobs %s >= %s max total jobs' % (
waitingJobs, totalJobs, maxTotalJobs)
else:
additionalJobs = 0
if waitingJobs < maxWaitingJobs:
additionalJobs = maxWaitingJobs - waitingJobs
if totalJobs + additionalJobs >= maxTotalJobs:
additionalJobs = maxTotalJobs - totalJobs
# For SSH CE case
if int(self.ceParameters.get('MaxWaitingJobs', 0)) == 0:
additionalJobs = maxTotalJobs - runningJobs
if availableProcessors is not None:
additionalJobs = min(additionalJobs, availableProcessors)
result['Value'] = additionalJobs
result['Message'] = message
result['CEInfoDict'] = ceInfoDict
return result
#############################################################################
def writeProxyToFile(self, proxy):
"""CE helper function to write a CE proxy string to a file.
"""
result = writeToProxyFile(proxy)
if not result['OK']:
self.log.error('Could not write proxy to file', result['Message'])
return result
proxyLocation = result['Value']
result = getProxyInfoAsString(proxyLocation)
if not result['OK']:
self.log.error('Could not get proxy info', result)
return result
else:
self.log.info('Payload proxy information:')
print(result['Value'])
return S_OK(proxyLocation)
#############################################################################
def _monitorProxy(self, pilotProxy, payloadProxy):
"""Base class for the monitor and update of the payload proxy, to be used in
derived classes for the basic renewal of the proxy, if further actions are
necessary they should be implemented there
"""
retVal = getProxyInfo(payloadProxy)
if not retVal['OK']:
self.log.error('Could not get payload proxy info', retVal)
return retVal
self.log.verbose('Payload Proxy information:\n%s' % formatProxyInfoAsString(retVal['Value']))
payloadProxyDict = retVal['Value']
payloadSecs = payloadProxyDict['chain'].getRemainingSecs()['Value']
if payloadSecs > self.minProxyTime:
self.log.verbose('No need to renew payload Proxy')
return S_OK()
# if there is no pilot proxy, assume there is a certificate and try a renewal
if not pilotProxy:
self.log.info('Using default credentials to get a new payload Proxy')
return gProxyManager.renewProxy(proxyToBeRenewed=payloadProxy, minLifeTime=self.minProxyTime,
newProxyLifeTime=self.defaultProxyTime,
proxyToConnect=pilotProxy)
# if there is pilot proxy
retVal = getProxyInfo(pilotProxy)
if not retVal['OK']:
return retVal
pilotProxyDict = retVal['Value']
if 'groupProperties' not in pilotProxyDict:
self.log.error('Invalid Pilot Proxy', 'Group has no properties defined')
return S_ERROR('Proxy has no group properties defined')
pilotProps = pilotProxyDict['groupProperties']
# if running with a pilot proxy, use it to renew the proxy of the payload
if Properties.PILOT in pilotProps or Properties.GENERIC_PILOT in pilotProps:
self.log.info('Using Pilot credentials to get a new payload Proxy')
return gProxyManager.renewProxy(proxyToBeRenewed=payloadProxy, minLifeTime=self.minProxyTime,
newProxyLifeTime=self.defaultProxyTime,
proxyToConnect=pilotProxy)
# if we are running with other type of proxy check if they are for the same user and group
# and copy the pilot proxy if necessary
self.log.info('Trying to copy pilot Proxy to get a new payload Proxy')
pilotProxySecs = pilotProxyDict['chain'].getRemainingSecs()['Value']
if pilotProxySecs <= payloadSecs:
errorStr = 'Pilot Proxy is not longer than payload Proxy'
self.log.error(errorStr)
return S_ERROR('Can not renew by copy: %s' % errorStr)
# check if both proxies belong to the same user and group
pilotDN = pilotProxyDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
retVal = pilotProxyDict['chain'].getDIRACGroup()
if not retVal['OK']:
return retVal
pilotGroup = retVal['Value']
payloadDN = payloadProxyDict['chain'].getIssuerCert()['Value'].getSubjectDN()['Value']
retVal = payloadProxyDict['chain'].getDIRACGroup()
if not retVal['OK']:
return retVal
payloadGroup = retVal['Value']
if pilotDN != payloadDN or pilotGroup != payloadGroup:
errorStr = 'Pilot Proxy and payload Proxy do not have same DN and Group'
self.log.error(errorStr)
return S_ERROR('Can not renew by copy: %s' % errorStr)
if pilotProxyDict.get('hasVOMS', False):
return pilotProxyDict['chain'].dumpAllToFile(payloadProxy)
attribute = CS.getVOMSAttributeForGroup(payloadGroup)
vo = CS.getVOMSVOForGroup(payloadGroup)
retVal = VOMS().setVOMSAttributes(pilotProxyDict['chain'], attribute=attribute, vo=vo)
if not retVal['OK']:
return retVal
chain = retVal['Value']
return chain.dumpAllToFile(payloadProxy)
def getDescription(self):
""" Get CE description as a dictionary
"""
ceDict = {}
for option, value in self.ceParameters.iteritems():
if isinstance(value, list):
ceDict[option] = value
elif isinstance(value, basestring):
try:
ceDict[option] = int(value)
except ValueError:
ceDict[option] = value
elif isinstance(value, (int, long, float)):
ceDict[option] = value
else:
self.log.warn('Type of option %s = %s not determined' % (option, value))
release = gConfig.getValue('/LocalSite/ReleaseVersion', version)
ceDict['DIRACVersion'] = release
ceDict['ReleaseVersion'] = release
project = gConfig.getValue("/LocalSite/ReleaseProject", "")
if project:
ceDict['ReleaseProject'] = project
result = self.getCEStatus()
if result['OK']:
if 'AvailableProcessors' in result:
cores = result['AvailableProcessors']
ceDict['NumberOfProcessors'] = cores
return S_OK(ceDict)
#############################################################################
def sendOutput(self, stdid, line): # pylint: disable=unused-argument, no-self-use
""" Callback function such that the results from the CE may be returned.
"""
print(line)
#############################################################################
def submitJob(self, executableFile, proxy, dummy=None, processors=1): # pylint: disable=unused-argument
""" Method to submit job, should be overridden in sub-class.
"""
name = 'submitJob()'
self.log.error('ComputingElement should be implemented in a subclass', name)
return S_ERROR('ComputingElement: %s should be implemented in a subclass' % (name))
#############################################################################
def getCEStatus(self, jobIDList=None): # pylint: disable=unused-argument
""" Method to get dynamic job information, can be overridden in sub-class.
"""
name = 'getCEStatus()'
self.log.error('ComputingElement should be implemented in a subclass', name)
return S_ERROR('ComputingElement: %s should be implemented in a subclass' % (name))
def getCEConfigDict(ceName):
"""Look into LocalSite for configuration Parameters for this CE
"""
ceConfigDict = {}
if ceName:
result = gConfig.getOptionsDict('/LocalSite/%s' % ceName)
if result['OK']:
ceConfigDict = result['Value']
return ceConfigDict
|
petricm/DIRAC
|
Resources/Computing/ComputingElement.py
|
Python
|
gpl-3.0
| 19,093
|
[
"DIRAC"
] |
77c8632615a6dd4c0547d4102bce8ee7da4f37c22997057a522786723904843d
|
########################################################################
# File: FTSDB.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/02 15:13:51
########################################################################
""" :mod: FTSDB
===========
.. module: FTSDB
:synopsis: FTS DB
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
FTS DB
"""
# #
# @file FTSDB.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/02 15:13:57
# @brief Definition of FTSDB class.
# # imports
# Get rid of the annoying Deprecation warning of the current MySQLdb
# FIXME: compile a newer MySQLdb version
import warnings
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', DeprecationWarning )
import MySQLdb.cursors
import decimal
from MySQLdb import Error as MySQLdbError
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.LockRing import LockRing
from DIRAC.Core.Utilities.List import stringListToString, intListToString
# # ORMs
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
########################################################################
class FTSDB( DB ):
"""
.. class:: FTSDB
database holding FTS jobs and their files
"""
def __init__( self, systemInstance = "Default", maxQueueSize = 10 ):
"""c'tor
:param self: self reference
:param str systemInstance: ???
:param int maxQueueSize: size of queries queue
"""
DB.__init__( self, "FTSDB", "DataManagement/FTSDB", maxQueueSize )
# self.log = gLogger.getSubLogger( "DataManagement/FTSDB" )
# # private lock
self.getIdLock = LockRing().getLock( "FTSDBLock" )
# # max attempt for reschedule
self.maxAttempt = 100
# # check tables
def createTables( self, toCreate = None, force = False ):
""" create tables """
toCreate = toCreate if toCreate else []
if not toCreate:
return S_OK()
tableMeta = self.getTableMeta()
metaCreate = {}
for tableName in toCreate:
metaCreate[tableName] = tableMeta[tableName]
if metaCreate:
return self._createTables( metaCreate, force )
return S_OK()
def getTables( self ):
""" get tables """
showTables = self._query( "SHOW TABLES;" )
if not showTables['OK']:
return showTables
return S_OK( [ table[0] for table in showTables['Value'] if table and table != "FTSHistoryView" ] )
@staticmethod
def getTableMeta():
""" get db schema in a dict format """
return dict( [ ( classDef.__name__, classDef.tableDesc() )
for classDef in ( FTSJob, FTSFile ) ] )
@staticmethod
def getViewMeta():
""" return db views in dict format
at the moment only one view - FTSHistoryView
"""
return { 'FTSHistoryView': FTSHistoryView.viewDesc() }
def createViews( self, force = False ):
""" create views """
return self._createViews( self.getViewMeta(), force )
def _checkTables( self, force = False ):
""" create tables if not existing
:param bool force: flag to trigger recreation of db schema
"""
return self._createTables( self.getTableMeta(), force = force )
def dictCursor( self, conn = None ):
""" get dict cursor for connection :conn:
:return: S_OK( { "cursor": cursors.DictCursor, "connection" : connection } ) or S_ERROR
"""
if not conn:
retDict = self._getConnection()
if not retDict['OK']:
return retDict
conn = retDict['Value']
cursor = conn.cursor( cursorclass = cursors.DictCursor )
return S_OK( { "cursor" : cursor, "connection" : conn } )
def _transaction( self, queries, connection = None ):
""" execute transaction """
queries = [ queries ] if type( queries ) == str else queries
# # get cursor and connection
getCursorAndConnection = self.dictCursor( connection )
if not getCursorAndConnection['OK']:
return getCursorAndConnection
cursor = getCursorAndConnection['Value']["cursor"]
connection = getCursorAndConnection['Value']["connection"]
# # this will be returned as query result
ret = { "OK" : True }
queryRes = { }
# # switch off autocommit
connection.autocommit( False )
try:
# # execute queries
for query in queries:
cursor.execute( query )
queryRes[query] = list( cursor.fetchall() )
# # commit
connection.commit()
# # save last row ID
lastrowid = cursor.lastrowid
# # close cursor
cursor.close()
ret['Value'] = queryRes
ret["lastrowid"] = lastrowid
connection.autocommit( True )
return ret
except MySQLdbError, error:
self.log.exception( error )
# # roll back
connection.rollback()
# # revert auto commit
connection.autocommit( True )
# # close cursor
cursor.close()
return S_ERROR( str( error ) )
def putFTSSite( self, ftsSite ):
""" put FTS site into DB """
if not ftsSite.FTSSiteID:
existing = self._query( "SELECT `FTSSiteID` FROM `FTSSite` WHERE `Name` = '%s'" % ftsSite.Name )
if not existing["OK"]:
self.log.error( "putFTSSite: %s" % existing["Message"] )
return existing
existing = existing["Value"]
if existing:
return S_ERROR( "putFTSSite: site of '%s' name is already defined at FTSSiteID = %s" % ( ftsSite.Name,
existing[0][0] ) )
ftsSiteSQL = ftsSite.toSQL()
if not ftsSiteSQL["OK"]:
self.log.error( "putFTSSite: %s" % ftsSiteSQL["Message"] )
return ftsSiteSQL
ftsSiteSQL = ftsSiteSQL["Value"]
putFTSSite = self._transaction( ftsSiteSQL )
if not putFTSSite["OK"]:
self.log.error( putFTSSite["Message"] )
return putFTSSite
def getFTSSite( self, ftsSiteID ):
""" read FTSSite given FTSSiteID """
getFTSSiteQuery = "SELECT * FROM `FTSSite` WHERE `FTSSiteID`=%s" % int( ftsSiteID )
getFTSSite = self._transaction( [ getFTSSiteQuery ] )
if not getFTSSite["OK"]:
self.log.error( "getFTSSite: %s" % getFTSSite["Message"] )
return getFTSSite
getFTSSite = getFTSSite["Value"]
if getFTSSiteQuery in getFTSSite and getFTSSite[getFTSSiteQuery]:
getFTSSite = FTSSite( getFTSSite[getFTSSiteQuery][0] )
return S_OK( getFTSSite )
# # if we land here FTSSite does not exist
return S_OK()
def deleteFTSSite( self, ftsSiteID ):
""" delete FTSSite given its FTSSiteID """
delete = "DELETE FROM `FTSSite` WHERE `FTSSiteID` = %s;" % int( ftsSiteID )
delete = self._transaction( [ delete ] )
if not delete["OK"]:
self.log.error( delete["Message"] )
return delete
def getFTSSitesList( self ):
""" bulk read of FTS sites """
ftsSitesQuery = "SELECT * FROM `FTSSite`;"
ftsSites = self._transaction( [ ftsSitesQuery ] )
if not ftsSites["OK"]:
self.log.error( "getFTSSites: %s" % ftsSites["Message"] )
return ftsSites
ftsSites = ftsSites["Value"][ftsSitesQuery] if ftsSitesQuery in ftsSites["Value"] else []
return S_OK( [ FTSSite( ftsSiteDict ) for ftsSiteDict in ftsSites ] )
def putFTSFile( self, ftsFile ):
""" put FTSFile into fts db """
ftsFileSQL = ftsFile.toSQL()
if not ftsFileSQL['OK']:
self.log.error( ftsFileSQL['Message'] )
return ftsFileSQL
ftsFileSQL = ftsFileSQL['Value']
putFTSFile = self._transaction( ftsFileSQL )
if not putFTSFile['OK']:
self.log.error( putFTSFile['Message'] )
return putFTSFile
def getFTSFile( self, ftsFileID ):
""" read FTSFile from db given FTSFileID """
select = "SELECT * FROM `FTSFile` WHERE `FTSFileID` = %s;" % ftsFileID
select = self._transaction( [ select ] )
if not select['OK']:
self.log.error( select['Message'] )
return select
select = select['Value']
if not select.values()[0]:
return S_OK()
ftsFile = FTSFile( select.values()[0][0] )
return S_OK( ftsFile )
def deleteFTSFiles( self, operationID, opFileIDList = None ):
""" delete FTSFiles for reschedule
:param int operationID: ReqDB.Operation.OperationID
:param list opFileIDList: [ ReqDB.File.FileID, ... ]
"""
query = [ "DELETE FROM `FTSFile` WHERE OperationID = %s" % operationID ]
if opFileIDList:
query.append( " AND `FileID` IN (%s)" % intListToString( opFileIDList ) )
query.append( ";" )
return self._update( "".join( query ) )
def getFTSJobsForRequest( self, requestID, statusList = None ):
""" get list of FTSJobs with status in :statusList: for request given its requestID
TODO: should be more smart, i.e. one query to select all ftsfiles
"""
statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
query = "SELECT * FROM `FTSJob` WHERE `RequestID` = %s AND `Status` in (%s)" % ( requestID,
stringListToString( statusList ) )
ftsJobs = self._transaction( [ query ] )
if not ftsJobs['OK']:
self.log.error( "getFTSJobsForRequest: %s" % ftsJobs['Message'] )
return ftsJobs
ftsJobs = ftsJobs['Value'][query] if query in ftsJobs['Value'] else []
ftsJobs = [ FTSJob( ftsJobDict ) for ftsJobDict in ftsJobs ]
for ftsJob in ftsJobs:
query = "SELECT * FROM `FTSFile` WHERE `FTSGUID` = '%s' AND `RequestID`=%s" % ( ftsJob.FTSGUID,
requestID )
ftsFiles = self._transaction( [ query ] )
if not ftsFiles['OK']:
self.log.error( "getFTSJobsForRequest: %s" % ftsFiles['Message'] )
return ftsFiles
ftsFiles = ftsFiles['Value'][query] if query in ftsFiles['Value'] else []
for ftsFileDict in ftsFiles:
ftsJob.addFile( FTSFile( ftsFileDict ) )
return S_OK( ftsJobs )
def getFTSFilesForRequest( self, requestID, statusList = None ):
""" get FTSFiles with status in :statusList: for request given its :requestID: """
requestID = int( requestID )
statusList = statusList if statusList else [ "Waiting" ]
query = "SELECT * FROM `FTSFile` WHERE `RequestID` = %s AND `Status` IN (%s);" % ( requestID,
stringListToString( statusList ) )
ftsFiles = self._transaction( [ query ] )
if not ftsFiles['OK']:
self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
return ftsFiles
ftsFiles = ftsFiles['Value'].get( query, [] )
return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles ] )
def getAllFTSFilesForRequest( self, requestID ):
""" get FTSFiles with status in :statusList: for request given its :requestID: """
requestID = int( requestID )
query = "SELECT * FROM `FTSFile` WHERE `RequestID` = %s;" % ( requestID )
ftsFiles = self._transaction( [ query ] )
if not ftsFiles['OK']:
self.log.error( "getFTSFilesForRequest: %s" % ftsFiles['Message'] )
return ftsFiles
ftsFiles = ftsFiles['Value'].get( query, [] )
return S_OK( [ FTSFile( ftsFileDict ) for ftsFileDict in ftsFiles ] )
def setFTSFilesWaiting( self, operationID, sourceSE, opFileIDList = None ):
""" propagate states for descendants in replication tree
:param int operationID: ReqDB.Operation.OperationID
:param str sourceSE: waiting source SE
:param list opFileIDList: [ ReqDB.File.FileID, ... ]
"""
operationID = int( operationID )
if opFileIDList:
opFileIDList = [ int( opFileID ) for opFileID in opFileIDList ]
status = "Waiting#%s" % sourceSE
query = "UPDATE `FTSFile` SET `Status` = 'Waiting' WHERE `Status` = '%s' AND `OperationID` = %s " % ( status,
operationID )
if opFileIDList:
query = query + "AND `FileID` IN (%s)" % intListToString( opFileIDList )
return self._update( query )
def peekFTSFile( self, ftsFileID ):
""" peek FTSFile given FTSFileID """
return self.getFTSFile( ftsFileID )
def putFTSJob( self, ftsJob ):
""" put FTSJob to the db (INSERT or UPDATE)
:param FTSJob ftsJob: FTSJob instance
"""
ftsJobSQL = ftsJob.toSQL()
if not ftsJobSQL['OK']:
return ftsJobSQL
putJob = [ ftsJobSQL['Value'] ]
for ftsFile in [ ftsFile.toSQL() for ftsFile in ftsJob ]:
if not ftsFile['OK']:
return ftsFile
putJob.append( ftsFile['Value'] )
putJob = self._transaction( putJob )
if not putJob['OK']:
self.log.error( putJob['Message'] )
return putJob
def getFTSJob( self, ftsJobID = None ):
""" get FTSJob given FTSJobID """
getJob = [ "SELECT * FROM `FTSJob` WHERE `FTSJobID` = %s;" % ftsJobID ]
getJob = self._transaction( getJob )
if not getJob['OK']:
self.log.error( getJob['Message'] )
return getJob
getJob = getJob['Value']
if not getJob:
return S_OK()
ftsJob = FTSJob( getJob.values()[0][0] )
selectFiles = self._transaction( [ "SELECT * FROM `FTSFile` WHERE `FTSGUID` = '%s';" % ftsJob.FTSGUID ] )
if not selectFiles['OK']:
self.log.error( selectFiles['Message'] )
return selectFiles
selectFiles = selectFiles['Value']
ftsFiles = [ FTSFile( item ) for item in selectFiles.values()[0] ]
for ftsFile in ftsFiles:
ftsJob.addFile( ftsFile )
return S_OK( ftsJob )
def setFTSJobStatus( self, ftsJobID, status ):
""" Set the status of an FTS job
"""
setAssigned = "UPDATE `FTSJob` SET `Status`='%s' WHERE `FTSJobID` = %s;" % ( status, ftsJobID )
setAssigned = self._update( setAssigned )
if not setAssigned['OK']:
self.log.error( setAssigned['Message'] )
return setAssigned
return setAssigned
def deleteFTSJob( self, ftsJobID ):
""" delete FTSJob given ftsJobID """
delete = "DELETE FROM `FTSJob` WHERE `FTSJobID` = %s;" % ftsJobID
delete = self._transaction( [ delete ] )
if not delete['OK']:
self.log.error( delete['Message'] )
return delete
def getFTSJobIDs( self, statusList = [ "Submitted", "Active", "Ready" ] ):
""" get FTSJobIDs for a given status list """
query = "SELECT `FTSJobID` FROM `FTSJob` WHERE `Status` IN (%s);" % stringListToString( statusList )
query = self._query( query )
if not query['OK']:
self.log.error( query['Message'] )
return query
# # convert to list of longs
return S_OK( [ item[0] for item in query['Value'] ] )
def getFTSFileIDs( self, statusList = None ):
""" select FTSFileIDs for a given status list """
statusList = statusList if statusList else [ "Waiting" ]
query = "SELECT `FTSFileID` FROM `FTSFile` WHERE `Status` IN (%s);" % stringListToString( statusList );
query = self._query( query )
if not query['OK']:
self.log.error( query['Message'] )
return query
return S_OK( [ item[0] for item in query['Value'] ] )
def getFTSJobList( self, statusList = None, limit = 500 ):
""" select FTS jobs with statuses in :statusList: """
statusList = statusList if statusList else list( FTSJob.INITSTATES + FTSJob.TRANSSTATES )
query = "SELECT * FROM `FTSJob` WHERE `Status` IN (%s) ORDER BY `LastUpdate` DESC LIMIT %s;" % ( stringListToString( statusList ),
limit )
trn = self._transaction( [ query ] )
if not trn['OK']:
self.log.error( "getFTSJobList: %s" % trn['Message'] )
return trn
ftsJobs = [ FTSJob( ftsJobDict ) for ftsJobDict in trn['Value'][query] ]
for ftsJob in ftsJobs:
query = "SELECT * FROM `FTSFile` WHERE `FTSGUID` = '%s';" % ftsJob.FTSGUID
trn = self._transaction( query )
if not trn['OK']:
self.log.error( "getFTSJobList: %s" % trn['Message'] )
return trn
ftsFiles = [ FTSFile( ftsFileDict ) for ftsFileDict in trn['Value'][query] ]
for ftsFile in ftsFiles:
ftsJob.addFile( ftsFile )
return S_OK( ftsJobs )
def putFTSFileList( self, ftsFileList ):
""" bulk put of FSTFiles
:param list ftsFileList: list with FTSFile instances
"""
queries = []
for ftsFile in ftsFileList:
ftsFileSQL = ftsFile.toSQL()
if not ftsFileSQL['OK']:
gLogger.error( "putFTSFileList: %s" % ftsFileSQL['Message'] )
return ftsFileSQL
queries.append( ftsFileSQL['Value'] )
if not queries:
return S_ERROR( "putFTSFileList: no queries to put" )
put = self._transaction( queries )
if not put['OK']:
gLogger.error( "putFTSFileList: %s" % put['Message'] )
return put
def getFTSFileList( self, statusList = None, limit = 1000 ):
""" get at most :limit: FTSFiles with status in :statusList:
:param list statusList: list with FTSFiles statuses
:param int limit: select query limit
"""
statusList = statusList if statusList else [ "Waiting" ]
reStatus = []
inStatus = []
for status in statusList:
if "%" in status or ".*" in status or ".+" in status:
reStatus.append( status )
else:
inStatus.append( status )
reQuery = "`Status` REGEXP '%s'" % "|".join( reStatus ) if reStatus else ""
inQuery = "`Status` IN (%s)" % stringListToString( inStatus ) if inStatus else ""
whereClause = " OR ".join( [ q for q in ( reQuery, inQuery ) if q ] )
if whereClause:
whereClause = "WHERE %s" % whereClause
query = "SELECT * FROM `FTSFile` %s ORDER BY `LastUpdate` DESC LIMIT %s;" % ( whereClause, limit )
trn = self._transaction( [query] )
if not trn['OK']:
self.log.error( "getFTSFileList: %s" % trn['Message'] )
return trn
return S_OK( [ FTSFile( fileDict ) for fileDict in trn['Value'][query] ] )
def getFTSHistory( self ):
""" query FTSHistoryView, return list of FTSHistoryViews """
query = self._transaction( [ "SELECT * FROM `FTSHistoryView`;" ] )
if not query['OK']:
return query
if not query['Value']:
return S_OK()
return S_OK( [ FTSHistoryView( fromDict ) for fromDict in query['Value'].values()[0] ] )
def cleanUpFTSFiles( self, requestID, fileIDs ):
""" delete FTSFiles for given :requestID: and list of :fileIDs:
:param int requestID: ReqDB.Request.RequestID
:param list fileIDs: [ ReqDB.File.FileID, ... ]
"""
query = "DELETE FROM `FTSFile` WHERE `RequestID`= %s and `FileID` IN (%s)" % ( requestID,
intListToString( fileIDs ) )
deleteFiles = self._transaction( [query] )
return deleteFiles
def getDBSummary( self ):
""" get DB summary """
# # this will be returned
retDict = { "FTSJob": {}, "FTSFile": {}, "FTSHistory": {} }
transQueries = { "SELECT `Status`, COUNT(`Status`) FROM `FTSJob` GROUP BY `Status`;" : "FTSJob",
"SELECT `Status`, COUNT(`Status`) FROM `FTSFile` GROUP BY `Status`;" : "FTSFile",
"SELECT * FROM `FTSHistoryView`;": "FTSHistory" }
ret = self._transaction( transQueries.keys() )
if not ret['OK']:
self.log.error( "getDBSummary: %s" % ret['Message'] )
return ret
ret = ret['Value']
for k, v in ret.items():
if transQueries[k] == "FTSJob":
for aDict in v:
status = aDict.get( "Status" )
count = aDict.get( "COUNT(`Status`)" )
if status not in retDict["FTSJob"]:
retDict["FTSJob"][status] = 0
retDict["FTSJob"][status] += count
elif transQueries[k] == "FTSFile":
for aDict in v:
status = aDict.get( "Status" )
count = aDict.get( "COUNT(`Status`)" )
if status not in retDict["FTSFile"]:
retDict["FTSFile"][status] = 0
retDict["FTSFile"][status] += count
else: # # FTSHistory
newListOfHistoryDicts = []
if v:
for oldHistoryDict in v:
newHistoryDict = {}
for key, value in oldHistoryDict.items():
if type( value ) == decimal.Decimal:
newHistoryDict[key] = float( value )
else:
newHistoryDict[key] = value
newListOfHistoryDicts.append( newHistoryDict )
retDict["FTSHistory"] = newListOfHistoryDicts
return S_OK( retDict )
def _getFTSJobProperties( self, ftsJobID, columnNames = None ):
""" select :columnNames: from FTSJob table """
columnNames = columnNames if columnNames else FTSJob.tableDesc()["Fields"].keys()
columnNames = ",".join( [ '`%s`' % str( columnName ) for columnName in columnNames ] )
return "SELECT %s FROM `FTSJob` WHERE `FTSJobID` = %s;" % ( columnNames, int( ftsJobID ) )
def _getFTSFileProperties( self, ftsFileID, columnNames = None ):
""" select :columnNames: from FTSJobFile table """
columnNames = columnNames if columnNames else FTSFile.tableDesc()["Fields"].keys()
columnNames = ",".join( [ '`%s`' % str( columnName ) for columnName in columnNames ] )
return "SELECT %s FROM `FTSFile` WHERE `FTSFileID` = %s;" % ( columnNames, int( ftsFileID ) )
def _getFTSHistoryProperties( self, columnNames = None ):
""" select :columnNames: from FTSHistory view """
columnNames = columnNames if columnNames else FTSHistoryView.viewDesc()["Fields"].keys()
return "SELECT %s FROM `FTSHistoryView`;" % ",".join( columnNames )
|
sposs/DIRAC
|
DataManagementSystem/DB/FTSDB.py
|
Python
|
gpl-3.0
| 21,698
|
[
"DIRAC"
] |
aec1d785608211712405fb88974bbd21f9af53e3b1cc167bd0c296c43f0f839d
|
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_desc = fh.read()
setup(
name='pybbn',
version='3.2.1',
author='Jee Vang',
author_email='vangjee@gmail.com',
packages=find_packages(exclude=('*.tests', '*.tests.*', 'tests.*', 'tests')),
description='Learning and Inference in Bayesian Belief Networks',
long_description=long_desc,
long_description_content_type='text/markdown',
url='https://github.com/vangj/py-bbn',
keywords=' '.join(['bayesian', 'belief', 'network', 'exact', 'approximate', 'inference', 'junction', 'tree',
'algorithm', 'pptc', 'dag', 'gibbs', 'sampling', 'multivariate', 'conditional', 'gaussian',
'linear', 'causal', 'causality', 'structure', 'parameter', 'causal', 'causality']),
install_requires=['numpy', 'scipy', 'networkx', 'pandas'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Development Status :: 5 - Production/Stable'
],
include_package_data=True,
test_suite='nose.collector'
)
|
vangj/py-bbn
|
setup.py
|
Python
|
apache-2.0
| 1,351
|
[
"Gaussian"
] |
55cc4bb1be4dba8f89a550df303e3e7549f4ab2b1022cf0a88e4ce2954949afa
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin import (Plugin, Int, Float, Range, Metadata, Str, Bool,
Choices, MetadataColumn, Categorical, List,
Citations, TypeMatch, TypeMap)
import q2_feature_table
from q2_types.feature_table import (
FeatureTable, Frequency, RelativeFrequency, PresenceAbsence, Composition)
from q2_types.feature_data import (
FeatureData, Sequence, Taxonomy, AlignedSequence)
from .examples import (feature_table_merge_example,
feature_table_merge_three_tables_example)
citations = Citations.load('citations.bib', package='q2_feature_table')
plugin = Plugin(
name='feature-table',
version=q2_feature_table.__version__,
website='https://github.com/qiime2/q2-feature-table',
package='q2_feature_table',
short_description=('Plugin for working with sample by feature tables.'),
description=('This is a QIIME 2 plugin supporting operations on sample '
'by feature tables, such as filtering, merging, and '
'transforming tables.')
)
plugin.methods.register_function(
function=q2_feature_table.rarefy,
inputs={'table': FeatureTable[Frequency]},
parameters={'sampling_depth': Int % Range(1, None),
'with_replacement': Bool},
outputs=[('rarefied_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be rarefied.'},
parameter_descriptions={
'sampling_depth': ('The total frequency that each sample should be '
'rarefied to. Samples where the sum of frequencies '
'is less than the sampling depth will be not be '
'included in the resulting table unless '
'subsampling is performed with replacement.'),
'with_replacement': ('Rarefy with replacement by sampling from the '
'multinomial distribution instead of rarefying '
'without replacement.')
},
output_descriptions={
'rarefied_table': 'The resulting rarefied feature table.'
},
name='Rarefy table',
description=("Subsample frequencies from all samples so that the sum of "
"frequencies in each sample is equal to sampling-depth."),
citations=[citations['Weiss2017']]
)
plugin.methods.register_function(
function=q2_feature_table.subsample,
inputs={'table': FeatureTable[Frequency]},
parameters={'subsampling_depth': Int % Range(1, None),
'axis': Str % Choices(['sample', 'feature'])},
outputs=[('sampled_table', FeatureTable[Frequency])],
input_descriptions={'table': 'The feature table to be sampled.'},
parameter_descriptions={
'subsampling_depth': ('The total number of samples or features to be '
'randomly sampled. Samples or features that are '
'reduced to a zero sum will not be included in '
'the resulting table.'),
'axis': ('The axis to sample over. If "sample" then samples will be '
'randomly selected to be retained. If "feature" then '
'a random set of features will be selected to be retained.')
},
output_descriptions={
'sampled_table': 'The resulting subsampled feature table.'
},
name='Subsample table',
description=("Randomly pick samples or features, without replacement, "
"from the table.")
)
plugin.methods.register_function(
function=q2_feature_table.presence_absence,
inputs={'table': FeatureTable[Frequency | RelativeFrequency]},
parameters={},
outputs=[('presence_absence_table', FeatureTable[PresenceAbsence])],
input_descriptions={
'table': ('The feature table to be converted into presence/absence '
'abundances.')
},
parameter_descriptions={},
output_descriptions={
'presence_absence_table': ('The resulting presence/absence feature '
'table.')
},
name="Convert to presence/absence",
description="Convert frequencies to binary values indicating presence or "
"absence of a feature in a sample."
)
plugin.methods.register_function(
function=q2_feature_table.relative_frequency,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[
('relative_frequency_table',
FeatureTable[RelativeFrequency])],
input_descriptions={
'table': 'The feature table to be converted into relative frequencies.'
},
parameter_descriptions={},
output_descriptions={
'relative_frequency_table': ('The resulting relative frequency '
'feature table.')
},
name="Convert to relative frequencies",
description="Convert frequencies to relative frequencies by dividing each "
"frequency in a sample by the sum of frequencies in that "
"sample."
)
plugin.methods.register_function(
function=q2_feature_table.transpose,
inputs={'table': FeatureTable[Frequency]},
parameters={},
outputs=[('transposed_feature_table',
FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table to be transposed.'
},
parameter_descriptions={},
output_descriptions={
'transposed_feature_table': ('The resulting transposed feature table.')
},
name='Transpose a feature table.',
description='Transpose the rows and columns '
'(typically samples and features) of a feature table.'
)
plugin.methods.register_function(
function=q2_feature_table.group,
inputs={'table': FeatureTable[Frequency]},
parameters={
'mode': Str % Choices({'sum', 'median-ceiling', 'mean-ceiling'}),
'metadata': MetadataColumn[Categorical],
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('grouped_table', FeatureTable[Frequency])
],
input_descriptions={
'table': 'The table to group samples or features on.'
},
parameter_descriptions={
'mode': 'How to combine samples or features within a group. `sum` '
'will sum the frequencies across all samples or features '
'within a group; `mean-ceiling` will take the ceiling of the '
'mean of these frequencies; `median-ceiling` will take the '
'ceiling of the median of these frequencies.',
'metadata': 'A column defining the groups. Each unique value will '
'become a new ID for the table on the given `axis`.',
'axis': 'Along which axis to group. Each ID in the given axis must '
'exist in `metadata`.'
},
output_descriptions={
'grouped_table': 'A table that has been grouped along the given '
'`axis`. IDs on that axis are replaced by values in '
'the `metadata` column.'
},
name="Group samples or features by a metadata column",
description="Group samples or features in a feature table using metadata "
"to define the mapping of IDs to a group."
)
i_table, p_overlap_method, o_table = TypeMap({
(FeatureTable[Frequency],
Str % Choices(sorted(q2_feature_table.overlap_methods()))):
FeatureTable[Frequency],
(FeatureTable[RelativeFrequency],
# We don't want to allow summing of RelativeFrequency tables, so remove
# that option from the overlap methods
Str % Choices(sorted(q2_feature_table.overlap_methods() - {'sum'}))):
FeatureTable[RelativeFrequency]
})
plugin.methods.register_function(
function=q2_feature_table.merge,
inputs={'tables': List[i_table]},
parameters={
'overlap_method': p_overlap_method
},
outputs=[
('merged_table', o_table)],
input_descriptions={
'tables': 'The collection of feature tables to be merged.',
},
parameter_descriptions={
'overlap_method': 'Method for handling overlapping ids.',
},
output_descriptions={
'merged_table': ('The resulting merged feature table.'),
},
name="Combine multiple tables",
description="Combines feature tables using the `overlap_method` provided.",
examples={'basic': feature_table_merge_example,
'three_tables': feature_table_merge_three_tables_example},
)
plugin.methods.register_function(
function=q2_feature_table.merge_seqs,
inputs={'data': List[FeatureData[Sequence]]},
parameters={},
outputs=[
('merged_data', FeatureData[Sequence])],
input_descriptions={
'data': 'The collection of feature sequences to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature sequences '
'containing all feature sequences provided.')
},
name="Combine collections of feature sequences",
description="Combines feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
plugin.methods.register_function(
function=q2_feature_table.merge_taxa,
inputs={'data': List[FeatureData[Taxonomy]]},
parameters={},
outputs=[
('merged_data', FeatureData[Taxonomy])],
input_descriptions={
'data': 'The collection of feature taxonomies to be merged.',
},
parameter_descriptions={},
output_descriptions={
'merged_data': ('The resulting collection of feature taxonomies '
'containing all feature taxonomies provided.')
},
name="Combine collections of feature taxonomies",
description="Combines a pair of feature data objects which may or may not "
"contain data for the same features. If different feature "
"data is present for the same feature id in the inputs, "
"the data from the first will be propagated to the result."
)
T1 = TypeMatch([Frequency, RelativeFrequency, PresenceAbsence, Composition])
plugin.methods.register_function(
function=q2_feature_table.rename_ids,
inputs={
'table': FeatureTable[T1],
},
parameters={
'metadata': MetadataColumn[Categorical],
'strict': Bool,
'axis': Str % Choices({'sample', 'feature'})
},
outputs=[
('renamed_table', FeatureTable[T1])
],
input_descriptions={
'table': 'The table to be renamed',
},
parameter_descriptions={
'metadata': 'A metadata column defining the new ids. Each original id '
'must map to a new unique id. If strict mode is used, '
'then every id in the original table must have a new id.',
'strict': 'Whether the naming needs to be strict (each id in '
'the table must have a new id). Otherwise, only the '
'ids described in `metadata` will be renamed and '
'the others will keep their original id names.',
'axis': 'Along which axis to rename the ids.',
},
output_descriptions={
'renamed_table': 'A table which has new ids, where the ids are '
'replaced by values in the `metadata` column.',
},
name='Renames sample or feature ids in a table',
description='Renames the sample or feature ids in a feature table using '
'metadata to define the new ids.',
)
# TODO: constrain min/max frequency when optional is handled by typemap
plugin.methods.register_function(
function=q2_feature_table.filter_samples,
inputs={'table': FeatureTable[T1]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_features': Int,
'max_features': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_features': Bool},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which samples should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a sample must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_features': ('The minimum number of features that a sample must '
'have to be retained.'),
'max_features': ('The maximum number of features that a sample can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'feature filter will be applied).'),
'metadata': 'Sample metadata used with `where` parameter when '
'selecting samples to retain, or with `exclude_ids` '
'when selecting samples to discard.',
'where': 'SQLite WHERE clause specifying sample metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all samples in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the samples selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_features': 'If true, features which are not present in '
'any retained samples are dropped.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by sample.'
},
name="Filter samples from table",
description="Filter samples from table based on frequency and/or "
"metadata. Any features with a frequency of zero after sample "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
plugin.methods.register_function(
function=q2_feature_table.filter_features_conditionally,
inputs={'table': FeatureTable[T1]},
parameters={'prevalence': Float % Range(0, 1),
'abundance': Float % Range(0, 1)
},
outputs=[('filtered_table', FeatureTable[T1])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'abundance': ('The minimum relative abundance for a feature to be '
'retained.'),
'prevalence': ('The minimum portion of samples that a feature '
'must have a relative abundance of at least '
'`abundance` to be retained.')
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from a table based on abundance and prevalence",
description=("Filter features based on the relative abundance in a "
"certain portion of samples (i.e., features must have a "
"relative abundance of at least `abundance` in at least "
"`prevalence` number of samples). Any samples with a "
"frequency of zero after feature filtering will also be "
"removed.")
)
plugin.methods.register_function(
function=q2_feature_table.filter_features,
inputs={'table': FeatureTable[Frequency]},
parameters={'min_frequency': Int,
'max_frequency': Int,
'min_samples': Int,
'max_samples': Int,
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool,
'filter_empty_samples': Bool},
outputs=[('filtered_table', FeatureTable[Frequency])],
input_descriptions={
'table': 'The feature table from which features should be filtered.'
},
parameter_descriptions={
'min_frequency': ('The minimum total frequency that a feature must '
'have to be retained.'),
'max_frequency': ('The maximum total frequency that a feature can '
'have to be retained. If no value is provided '
'this will default to infinity (i.e., no maximum '
'frequency filter will be applied).'),
'min_samples': ('The minimum number of samples that a feature must '
'be observed in to be retained.'),
'max_samples': ('The maximum number of samples that a feature can '
'be observed in to be retained. If no value is '
'provided this will default to infinity (i.e., no '
'maximum sample filter will be applied).'),
'metadata': 'Feature metadata used with `where` parameter when '
'selecting features to retain, or with `exclude_ids` '
'when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the feature table will be retained.',
'exclude_ids': 'If true, the features selected by `metadata` or '
'`where` parameters will be excluded from the filtered '
'table instead of being retained.',
'filter_empty_samples': 'If true, drop any samples where none of the '
'retained features are present.',
},
output_descriptions={
'filtered_table': 'The resulting feature table filtered by feature.'
},
name="Filter features from table",
description="Filter features from table based on frequency and/or "
"metadata. Any samples with a frequency of zero after feature "
"filtering will also be removed. See the filtering tutorial "
"on https://docs.qiime2.org for additional details."
)
T2 = TypeMatch([Sequence, AlignedSequence])
plugin.methods.register_function(
function=q2_feature_table.filter_seqs,
inputs={
'data': FeatureData[T2],
'table': FeatureTable[Frequency],
},
parameters={
'metadata': Metadata,
'where': Str,
'exclude_ids': Bool
},
outputs=[('filtered_data', FeatureData[T2])],
input_descriptions={
'data': 'The sequences from which features should be filtered.',
'table': 'Table containing feature ids used for id-based filtering.'
},
parameter_descriptions={
'metadata': 'Feature metadata used for id-based filtering, with '
'`where` parameter when selecting features to retain, or '
'with `exclude_ids` when selecting features to discard.',
'where': 'SQLite WHERE clause specifying feature metadata criteria '
'that must be met to be included in the filtered feature '
'table. If not provided, all features in `metadata` that are '
'also in the sequences will be retained.',
'exclude_ids': 'If true, the features selected by the `metadata` '
'(with or without the `where` parameter) or `table` '
'parameter will be excluded from the filtered '
'sequences instead of being retained.'
},
output_descriptions={
'filtered_data': 'The resulting filtered sequences.'
},
name="Filter features from sequences",
description="Filter features from sequences based on a feature table or "
"metadata. See the filtering tutorial on "
"https://docs.qiime2.org for additional details. This method "
"can filter based on ids in a table or a metadata file, but "
"not both (i.e., the table and metadata options are mutually "
"exclusive)."
)
plugin.visualizers.register_function(
function=q2_feature_table.summarize,
inputs={'table': FeatureTable[Frequency | RelativeFrequency |
PresenceAbsence]},
parameters={'sample_metadata': Metadata},
input_descriptions={'table': 'The feature table to be summarized.'},
parameter_descriptions={'sample_metadata': 'The sample metadata.'},
name="Summarize table",
description="Generate visual and tabular summaries of a feature table."
)
plugin.visualizers.register_function(
function=q2_feature_table.tabulate_seqs,
inputs={'data': FeatureData[Sequence | AlignedSequence]},
parameters={},
input_descriptions={'data': 'The feature sequences to be tabulated.'},
parameter_descriptions={},
name='View sequence associated with each feature',
description="Generate tabular view of feature identifier to sequence "
"mapping, including links to BLAST each sequence against "
"the NCBI nt database.",
citations=[citations['NCBI'], citations['NCBI-BLAST']]
)
plugin.visualizers.register_function(
function=q2_feature_table.core_features,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'min_fraction': Float % Range(0.0, 1.0, inclusive_start=False),
'max_fraction': Float % Range(0.0, 1.0, inclusive_end=True),
'steps': Int % Range(2, None)
},
name='Identify core features in table',
description=('Identify "core" features, which are features observed in a '
'user-defined fraction of the samples. Since the core '
'features are a function of the fraction of samples that the '
'feature must be observed in to be considered core, this is '
'computed over a range of fractions defined by the '
'`min_fraction`, `max_fraction`, and `steps` parameters.'),
input_descriptions={
'table': 'The feature table to use in core features calculations.'
},
parameter_descriptions={
'min_fraction': 'The minimum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'max_fraction': 'The maximum fraction of samples that a feature must '
'be observed in for that feature to be considered a '
'core feature.',
'steps': 'The number of steps to take between `min_fraction` and '
'`max_fraction` for core features calculations. This '
'parameter has no effect if `min_fraction` and '
'`max_fraction` are the same value.'
}
)
plugin.visualizers.register_function(
function=q2_feature_table.heatmap,
inputs={
'table': FeatureTable[Frequency]
},
parameters={
'sample_metadata': MetadataColumn[Categorical],
'feature_metadata': MetadataColumn[Categorical],
'normalize': Bool,
'title': Str,
'metric': Str % Choices(q2_feature_table.heatmap_choices['metric']),
'method': Str % Choices(q2_feature_table.heatmap_choices['method']),
'cluster': Str % Choices(q2_feature_table.heatmap_choices['cluster']),
'color_scheme': Str % Choices(
q2_feature_table.heatmap_choices['color_scheme']),
},
name='Generate a heatmap representation of a feature table',
description='Generate a heatmap representation of a feature table with '
'optional clustering on both the sample and feature axes.\n\n'
'Tip: To generate a heatmap containing taxonomic annotations, '
'use `qiime taxa collapse` to collapse the feature table at '
'the desired taxonomic level.',
input_descriptions={
'table': 'The feature table to visualize.'
},
parameter_descriptions={
'sample_metadata': 'Annotate the sample IDs with these sample '
'metadata values. When metadata is present and '
'`cluster`=\'feature\', samples will be sorted by '
'the metadata values.',
'feature_metadata': 'Annotate the feature IDs with these feature '
'metadata values. When metadata is present and '
'`cluster`=\'sample\', features will be sorted by '
'the metadata values.',
'normalize': 'Normalize the feature table by adding a psuedocount '
'of 1 and then taking the log10 of the table.',
'title': 'Optional custom plot title.',
'metric': 'Metrics exposed by seaborn (see http://seaborn.pydata.org/'
'generated/seaborn.clustermap.html#seaborn.clustermap for '
'more detail).',
'method': 'Clustering methods exposed by seaborn (see http://seaborn.'
'pydata.org/generated/seaborn.clustermap.html#seaborn.clust'
'ermap for more detail).',
'cluster': 'Specify which axes to cluster.',
'color_scheme': 'The matplotlib colorscheme to generate the heatmap '
'with.',
},
citations=[citations['Hunter2007Matplotlib']]
)
|
qiime2/q2-feature-table
|
q2_feature_table/plugin_setup.py
|
Python
|
bsd-3-clause
| 26,276
|
[
"BLAST"
] |
ab4bc46ffeea25ada51abf5d19c0a7a6509580fd807c07e6cbeb99b825daf7f8
|
# Orca
#
# Copyright 2010 Joanmarie Diggs, Mesar Hameed.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
""" A list of common keybindings and unbound keys
pulled out from default.py: __getLaptopBindings()
with the goal of being more readable and less monolithic.
"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs, Mesar Hameed."
__license__ = "LGPL"
from . import keybindings
# Storing values
defaultModifierMask = keybindings.defaultModifierMask
ORCA_MODIFIER_MASK = keybindings.ORCA_MODIFIER_MASK
NO_MODIFIER_MASK = keybindings.NO_MODIFIER_MASK
ORCA_SHIFT_MODIFIER_MASK = keybindings.ORCA_SHIFT_MODIFIER_MASK
ORCA_CTRL_MODIFIER_MASK = keybindings.ORCA_CTRL_MODIFIER_MASK
keymap = (
("9", defaultModifierMask, ORCA_MODIFIER_MASK,
"routePointerToItemHandler"),
# We want the user to be able to combine modifiers with the
# mouse click (e.g. to Shift+Click and select), therefore we
# do not "care" about the modifiers (other than the Orca
# modifier).
#
("7", ORCA_MODIFIER_MASK, ORCA_MODIFIER_MASK,
"leftClickReviewItemHandler"),
("8", ORCA_MODIFIER_MASK, ORCA_MODIFIER_MASK,
"rightClickReviewItemHandler"),
("p", defaultModifierMask, ORCA_MODIFIER_MASK,
"toggleFlatReviewModeHandler"),
("semicolon", defaultModifierMask, ORCA_MODIFIER_MASK,
"sayAllHandler"),
("Return", defaultModifierMask, ORCA_MODIFIER_MASK,
"whereAmIBasicHandler", 1),
("Return", defaultModifierMask, ORCA_MODIFIER_MASK,
"whereAmIDetailedHandler", 2),
("slash", defaultModifierMask, ORCA_MODIFIER_MASK,
"getTitleHandler", 1),
("slash", defaultModifierMask, ORCA_MODIFIER_MASK,
"getStatusBarHandler", 2),
("bracketleft", defaultModifierMask, ORCA_MODIFIER_MASK,
"findHandler"),
("bracketright", defaultModifierMask, ORCA_MODIFIER_MASK,
"findNextHandler"),
("bracketright", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"findPreviousHandler"),
("u", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewPreviousLineHandler"),
("u", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewHomeHandler"),
("i", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewCurrentLineHandler", 1),
("i", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewSpellCurrentLineHandler", 2),
("i", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewPhoneticCurrentLineHandler", 3),
("o", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewNextLineHandler"),
("o", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewEndHandler"),
("j", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewPreviousItemHandler"),
("j", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewAboveHandler"),
("k", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewCurrentItemHandler", 1),
("k", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewSpellCurrentItemHandler", 2),
("k", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewPhoneticCurrentItemHandler", 3),
("k", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewCurrentAccessibleHandler"),
("l", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewNextItemHandler"),
("l", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewBelowHandler"),
("m", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewPreviousCharacterHandler"),
("m", defaultModifierMask, ORCA_CTRL_MODIFIER_MASK,
"reviewEndOfLineHandler"),
("comma", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewCurrentCharacterHandler", 1),
("comma", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewSpellCurrentCharacterHandler", 2),
("comma", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewUnicodeCurrentCharacterHandler", 3),
("period", defaultModifierMask, ORCA_MODIFIER_MASK,
"reviewNextCharacterHandler"),
)
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/laptop_keyboardmap.py
|
Python
|
gpl-3.0
| 4,620
|
[
"ORCA"
] |
50f6942c23a9a8b2c593cb5e9a21ce4634857e03a6c67d3c36229ca70c45c792
|
import os
import sys
import os.path as op
import pysam
from bcbio.log import logger
from bcbio.utils import file_exists, safe_makedir, chdir, get_perl_exports
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
def run(data):
config = data[0][0]['config']
work_dir = dd.get_work_dir(data[0][0])
genome = dd.get_ref_file(data[0][0])
mirdeep2 = os.path.join(os.path.dirname(sys.executable), "miRDeep2.pl")
perl_exports = get_perl_exports()
hairpin, mature, species = "none", "none", "na"
rfam_file = dd.get_mirdeep2_file(data[0][0])
if file_exists(dd.get_mirbase_hairpin(data[0][0])):
species = dd.get_species(data[0][0])
hairpin = dd.get_mirbase_hairpin(data[0][0])
mature = dd.get_mirbase_mature(data[0][0])
logger.debug("Preparing for mirdeep2 analysis.")
bam_file = op.join(work_dir, "align", "seqs.bam")
seqs_dir = op.join(work_dir, "seqcluster", "prepare")
collapsed = op.join(seqs_dir, "seqs.ma")
out_dir = op.join(work_dir, "mirdeep2")
out_file = op.join(out_dir, "result_res.csv")
safe_makedir(out_dir)
if not file_exists(rfam_file):
logger.warning("mirdeep2 Rfam file not instaled. Skipping...")
return None
if not file_exists(mirdeep2):
logger.warning("mirdeep2 executable file not found. Skipping...")
return None
with chdir(out_dir):
collapsed, bam_file = _prepare_inputs(collapsed, bam_file, out_dir)
cmd = ("{perl_exports} && perl {mirdeep2} {collapsed} {genome} {bam_file} {mature} none {hairpin} -f {rfam_file} -r simple -c -P -t {species} -z res").format(**locals())
if not file_exists(out_file):
try:
do.run(cmd.format(**locals()), "Running mirdeep2.")
except:
logger.warning("mirdeep2 failed. Please report the error to https://github.com/lpantano/mirdeep2_core/issues.")
if file_exists(out_file):
novel_db = _parse_novel(out_file, dd.get_species(data[0][0]))
return novel_db
def _prepare_inputs(ma_fn, bam_file, out_dir):
"""
Convert to fastq with counts
"""
fixed_fa = os.path.join(out_dir, "file_reads.fa")
count_name =dict()
with file_transaction(fixed_fa) as out_tx:
with open(out_tx, 'w') as out_handle:
with open(ma_fn) as in_handle:
h = in_handle.next()
for line in in_handle:
cols = line.split("\t")
name_with_counts = "%s_x%s" % (cols[0], sum(map(int, cols[2:])))
count_name[cols[0]] = name_with_counts
print >>out_handle, ">%s\n%s" % (name_with_counts, cols[1])
fixed_bam = os.path.join(out_dir, "align.bam")
bam_handle = pysam.AlignmentFile(bam_file, "rb")
with pysam.AlignmentFile(fixed_bam, "wb", template=bam_handle) as out_handle:
for read in bam_handle.fetch():
read.query_name = count_name[read.query_name]
out_handle.write(read)
return fixed_fa, fixed_bam
def _parse_novel(csv_file, sps="new"):
"""Create input of novel miRNAs from miRDeep2"""
read = 0
seen = set()
safe_makedir("novel")
with open("novel/hairpin.fa", "w") as fa_handle, open("novel/miRNA.str", "w") as str_handle:
with open(csv_file) as in_handle:
for line in in_handle:
if line.startswith("mature miRBase miRNAs detected by miRDeep2"):
break
if line.startswith("novel miRNAs predicted"):
read = 1
line = in_handle.next()
continue
if read and line.strip():
cols = line.strip().split("\t")
name, start, score = cols[0], cols[16], cols[1]
if score < 1:
continue
m5p, m3p, pre = cols[13], cols[14], cols[15].replace('u','t').upper()
m5p_start = cols[15].find(m5p) + 1
m3p_start = cols[15].find(m3p) + 1
m5p_end = m5p_start + len(m5p) - 1
m3p_end = m3p_start + len(m3p) - 1
if m5p in seen:
continue
print >>fa_handle, (">{sps}-{name} {start}\n{pre}").format(**locals())
print >>str_handle, (">{sps}-{name} ({score}) [{sps}-{name}-5p:{m5p_start}-{m5p_end}] [{sps}-{name}-3p:{m3p_start}-{m3p_end}]").format(**locals())
seen.add(m5p)
return op.abspath("novel")
|
biocyberman/bcbio-nextgen
|
bcbio/srna/mirdeep.py
|
Python
|
mit
| 4,651
|
[
"pysam"
] |
6c0fe0d3ef0e555baccf17978208730cc451cbaf73e73e232be50992a586e64f
|
#Copyright 2009 Almero Gouws, <14366037@sun.ac.za>
"""
This module provides general functions that are used by the Python
based Bayesian network toolbox.
"""
__docformat__ = 'restructuredtext'
import numpy as np
import pylab
def determine_observed(evidence):
"""
Determine which nodes are observed and which are hidden.
Parameters
----------
evidence: List
A list of any observed evidence. If evidence[i] = None, then
node i is unobserved (hidden node), else if evidence[i] =
SomeValue then, node i has been observed as being SomeValue.
"""
hidden = []
observed = []
for i in range(len(evidence)):
if evidence[i] is None:
hidden.append(i)
else:
observed.append(i)
return np.array(hidden), np.array(observed)
def determine_pot_type(model, onodes):
"""
Determines the potential type of the model, based on which nodes are
hidden.
Parameters
----------
Model: bnet object
A pobabalistic graphical model.
onodes: List
A list of indices of the observed nodes in the model.
"""
nodes = model.order
hnodes = mysetdiff(np.array(nodes), np.array(onodes))
if len(np.intersect1d(np.array(model.cnodes[:]), \
np.array(hnodes).tolist())) == 0:
"""If all hidden nodes are discrete nodes"""
pot_type = 'd'
elif issubset(np.array(hnodes), np.array(model.cnodes[:])):
"""If all the hidden nodes are continuous"""
pot_type = 'g'
else:
pot_type = 'cg'
return pot_type
def mysetdiff(A, B):
"""
Returns the difference between 2 sets.
Parameters
----------
A: List
A list defining a set.
B: List
A list defining a set.
"""
if A.shape == (0,):
return np.array(([]))
elif B.shape == (0,):
return A
else:
return np.setdiff1d(A,B)
def issubset(small, large):
"""
Returns true if the set in 'small' is a subset of the set
in 'large'.
Parameters
----------
small: List
A list defining a set.
large: List
A list defining a set.
"""
if small.shape == (0,):
issubset = True
else:
temp = np.intersect1d(np.array(small[:]), np.array(large[:]))
issubset = temp.shape[0] == small.shape[0]
return issubset
def find(U):
"""
Returns the indices of the elements that are True in U.
Parameters
----------
U: A flat 2D array filled with ones and zeros.
"""
args = np.argwhere(U)
U = args[:,1]
return np.array([U])
def draw_graph(adj_mat, names=[], directed=False, text=''):
"""
Draws the graph represented by adj_mat to a figure.
Parameters
----------
adj_mat: Numpy ndarray
An adjacency matrix representing a graph, e.g. adj_mat[i, j] == 1,
then node i and node j are connected.
names: List
A list of the names of the nodes, e.g names[i] == 'Rain', then node
i is labeled 'Rain'.
"""
import networkx as nx
pylab.figure()
pylab.title(text)
if directed == False:
if names != []:
g = nx.Graph()
g.add_nodes_from(names)
edges = np.argwhere(adj_mat == 1)
for i in edges:
g.add_edge((names[i[0]], names[i[1]]))
else:
g = nx.Graph(adj_mat)
else:
if names != []:
g = nx.DiGraph()
g.add_nodes_from(names)
edges = np.argwhere(adj_mat == 1)
for i in edges:
g.add_edge((names[i[0]], names[i[1]]))
else:
g = nx.DiGraph(adj_mat)
nx.draw(g)
def mk_stochastic(mat):
"""
Ensure that the sum over the last dimension is one. If mat is a 3D
matrix, then this function will ensure that sum_k(i, j, k) = 1 for
all i and j.
Parameters
----------
mat: numpy ndarray
The matrix to convert.
"""
if mat.squeeze().ndim == 1:
mat = mat.squeeze()
ns = mat.shape
mat = mat.reshape(np.prod(ns[0:-1]), ns[len(ns) - 1], order='FORTRAN')
s = np.sum(mat, 1)
s = np.array([s + (s==0)])
norm = np.repeat(s, ns[len(ns) - 1], 0)
mat = mat / norm.T
mat = mat.reshape(ns, order='FORTRAN')
return mat
def compute_counts(data, sz):
"""
Counts the number of times each combination of discrete assignments
occurs. For instance, if sz = [2, 2], that means there are two binary
nodes, which can be in 2**2 = 4 possible states: [0, 0], [1, 0], [0, 1]
and [1, 1]. The output of this function would be a 2-by-2 matrix,
containing that tally of each time a certain discrete combination occured
in data. Therefore counts[0, 1] = the amount of times the combination
of the first possible value for node 1 and the second possible value for
node 2 occurred, in this binary case, the combination [0, 1].
Parameters
----------
data: numpy ndarray
data(i,t) is the value of variable i in case t.
sz:
The values for variable i are assumed to be in range(0, sz(i))
"""
P = np.prod(sz)
indices = subv2ind(sz, data)
count = np.histogram(indices, P, (0, P-1))
count = count[0]
count = count.reshape(sz)
count = count.T
return count
def subv2ind(sz, sub):
"""
Linear index from subscript vector.
Parameters
----------
sz: List
The size of the array we want to create a linear index for.
sub: numpy ndarray
The subscript vector
"""
cum_size = np.cumprod(sz[0:-1])
prev_cum_size = np.mat((np.hstack((1, cum_size))))
index = (sub + 1)*prev_cum_size.T - np.sum(prev_cum_size)
return index
def mk_multi_index(n, dims, vals):
"""
Creates a list of slices, named index. The list can be used to slice an
array, for example:
index = mk_multi_index(3, [0, 2], [3, 2])
gives index = [slice(3,4), slice(None), slice(2, 3)],
which will select out dim 0 the 3rd entry, out of dim 1 everything,
and out of dim 2 the 2nd entry.
So if A[:,:,1]=[[1 2], [3 4], [5 6]]
A[:,:,2]=[[7 8], [9 10], [11 12]]
then A(index{:}) = [11 12].
Parameters
----------
n: Int
The number of dimensions the matrix to be sliced has.
dims: List
The dimensions we wish to slice from.
vals: List
Which entries to select out of the desired dimensions.
"""
index = []
for i in range(0, n):
if i in dims:
val = vals[dims.index(i)]
index.append(slice(val, val + 1))
else:
index.append(slice(None))
return index
def mk_undirected(model_graph):
"""
Converts an adjacency matrix representing edges in a directed graph,
by making all the edges undirected.
Parameters
----------
model_graph: Numpy array
The adjacency matrix representing the directed graph.
"""
for i in range(0, model_graph.shape[0]):
for j in range(0, model_graph.shape[1]):
if model_graph[i, j] == 1:
model_graph[j, i] = 1
return model_graph
def block(blocks, block_sizes):
"""
Return a vector of subscripts corresponding to specified blocks.
"""
skip = np.cumsum(block_sizes).tolist()
skip.insert(0, 0)
start = np.array(skip)[blocks]
fin = start + block_sizes[blocks]
if type(blocks) == int:
len_blocks = 1
start = [start]
fin = [fin]
else:
len_blocks = len(blocks)
sub = []
for i in range(0, len_blocks):
sub.append(range(start[i], fin[i]))
return sub
def gaussian_prob(x, m, C, use_log=False):
"""
Evaluate a multivariate Gaussian density.
"""
[N, d] = x.shape
M = m.flatten().T * np.matrix(np.ones((1, N)))
denom = (2 * np.pi)**(float(d)/2) * np.sqrt(np.abs(np.linalg.det(C)))
mahal = np.array((x.T - M).T * np.linalg.inv(C))
mahal = np.sum(mahal * np.array((x.T - M)).T, 1)
eps = 2.2204 * np.exp(-16)
if use_log:
p = -0.5 * mahal - np.log(denom)
else:
p = (np.exp(-0.5 * mahal)) / (denom + eps)
return p
|
bhrzslm/uncertainty-reasoning
|
my_engine/others/GrMPy/lib/GrMPy/general.py
|
Python
|
mit
| 8,556
|
[
"Gaussian"
] |
9626d3ac8fef9d5d2526765a24660d53b39ea6b1e1cb1fd4d090fb555a193464
|
# vi:sts=4:sw=4:et
"""Code for parsing OpenEmbedded license strings"""
import ast
import re
from fnmatch import fnmatchcase as fnmatch
class LicenseError(Exception):
pass
class LicenseSyntaxError(LicenseError):
def __init__(self, licensestr, exc):
self.licensestr = licensestr
self.exc = exc
LicenseError.__init__(self)
def __str__(self):
return "error in '%s': %s" % (self.licensestr, self.exc)
class InvalidLicense(LicenseError):
def __init__(self, license):
self.license = license
LicenseError.__init__(self)
def __str__(self):
return "invalid characters in license '%s'" % self.license
license_operator = re.compile('([&|() ])')
license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Syntax tree visitor which can accept OpenEmbedded license strings"""
def visit_string(self, licensestr):
new_elements = []
elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos-1]):
new_elements.append('&')
element = '"' + element + '"'
elif not license_operator.match(element):
raise InvalidLicense(element)
new_elements.append(element)
self.visit(ast.parse(' '.join(new_elements)))
class FlattenVisitor(LicenseVisitor):
"""Flatten a license tree (parsed from a string) by selecting one of each
set of OR options, in the way the user specifies"""
def __init__(self, choose_licenses):
self.choose_licenses = choose_licenses
self.licenses = []
LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.licenses.append(node.s)
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
left.visit(node.left)
right = FlattenVisitor(self.choose_licenses)
right.visit(node.right)
selected = self.choose_licenses(left.licenses, right.licenses)
self.licenses.extend(selected)
else:
self.generic_visit(node)
def flattened_licenses(licensestr, choose_licenses):
"""Given a license string and choose_licenses function, return a flat list of licenses"""
flatten = FlattenVisitor(choose_licenses)
try:
flatten.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
def is_included(licensestr, whitelist=None, blacklist=None):
"""Given a license string and whitelist and blacklist, determine if the
license string matches the whitelist and does not match the blacklist.
Returns a tuple holding the boolean state and a list of the applicable
licenses which were excluded (or None, if the state is True)
"""
def include_license(license):
return any(fnmatch(license, pattern) for pattern in whitelist)
def exclude_license(license):
return any(fnmatch(license, pattern) for pattern in blacklist)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses)."""
alpha_weight = len(filter(include_license, alpha))
beta_weight = len(filter(include_license, beta))
if alpha_weight > beta_weight:
return alpha
else:
return beta
if not whitelist:
whitelist = ['*']
if not blacklist:
blacklist = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = filter(lambda lic: exclude_license(lic), licenses)
included = filter(lambda lic: include_license(lic), licenses)
if excluded:
return False, excluded
else:
return True, included
|
wwright2/dcim3-angstrom1
|
sources/openembedded-core/meta/lib/oe/license.py
|
Python
|
mit
| 3,978
|
[
"VisIt"
] |
e9af2f8d51b08c74e40bedbee2d4af0bd467e71b1df019393461c008d09f9ba1
|
#
# Modified based on MINDO3_Parameters.py and Slater.py in PyQuante-1.6
#
"""\
MINDO3.py: Dewar's MINDO/3 Semiempirical Method
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
import numpy
from pyscf import lib
from pyscf.data.nist import HARTREE2EV
E2 = 14.399 # Coulomb's law coeff if R in \AA and resulting E in eV
E2 /= HARTREE2EV # Convert to Hartree
EV2KCAL = 23.061 # Conversion of energy in eV to energy in kcal/mol
HARTREE2KCAL = HARTREE2EV * EV2KCAL
#############################
#
# MINDO/3 parameters
#
# in eV
USS3 = numpy.array((
0. , -12.505, 0. ,
0. , 0. , -33.61, -51.79, -66.06, -91.73, -129.86, 0. ,
0. , 0. , 0. , -39.82, -56.23, -73.39, -98.99 , 0. ,
))
UPP3 = numpy.array((
0. , 0. , 0. ,
0. , 0. , -25.11, -39.18, -56.40, -78.80, -105.93, 0. ,
0. , 0. , 0. , -29.15, -42.31, -57.25, -76.43 , 0. ,
))
# Convert to Eh
USS3 *= 1./HARTREE2EV
UPP3 *= 1./HARTREE2EV
# *** ONE CENTER REPULSION INTEGRALS
# GSS ::= (SS,SS)
# GPP ::= (PP,PP)
# GSP ::= (SS,PP)
# GP2 ::= (PP,P*P*)
# HSP ::= (SP,SP)
GSSM = numpy.array((
0. , 12.848, 0. ,
0. , 9.00, 10.59, 12.23, 13.59, 15.42, 16.92, 0. ,
0. , 0. , 8.09, 9.82, 11.56, 12.88, 15.03, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 15.03643948, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 15.04044855, 0. ,
))
GPPM = numpy.array((
0. , 0. , 0. ,
0. , 6.97, 8.86, 11.08, 12.98, 14.52, 16.71, 0. ,
0. , 0. , 5.98, 7.31, 8.64, 9.90, 11.30, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 11.27632539, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 11.14778369, 0. ,
))
GSPM = numpy.array((
0. , 0. , 0. ,
0. , 7.43, 9.56, 11.47, 12.66, 14.48, 17.25, 0. ,
0. , 0. , 6.63, 8.36, 10.08, 11.26, 13.16, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 13.03468242, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 13.05655798, 0. ,
))
GP2M = numpy.array((
0. , 0. , 0. ,
0. , 6.22, 7.86, 9.84, 11.59, 12.98, 14.91, 0. ,
0. , 0. , 5.40, 6.54, 7.68, 8.83, 9.97, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 9.85442552, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 9.91409071, 0. ,
))
HSPM = numpy.array((
0. , 0. , 0. ,
0. , 1.28, 1.81, 2.43, 3.14, 3.94, 4.83, 0. ,
0. , 0. , 0.70, 1.32, 1.92, 2.26, 2.42, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 2.45586832, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 2.45638202, 0. ,
))
HP2M = numpy.array((
0. , 0. , 0. ,
0. , 0. , 0.50, 0.62, 0.70, 0.77, 0.90, 0. ,
0. , 0. , 0. , 0.38, 0.48, 0.54, 0.67, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
))
GSSM *= 1./HARTREE2EV
GPPM *= 1./HARTREE2EV
GSPM *= 1./HARTREE2EV
GP2M *= 1./HARTREE2EV
HSPM *= 1./HARTREE2EV
HP2M *= 1./HARTREE2EV
# *** F03 IS THE ONE CENTER AVERAGED REPULSION INTEGRAL FOR USE IN THE
# TWO CENTER ELECTRONIC REPULSION INTEGRAL EVALUATION.
F03 = numpy.array((
0., 12.848, 10.0,
10.0, 0.0, 8.958, 10.833, 12.377, 13.985, 16.250,
10.000, 10.000, 0.000, 0.000,7.57 , 9.00 ,10.20 , 11.73
))
F03 *= 1./HARTREE2EV
VS = numpy.array((
0. , -13.605, 0. ,
0. , 0. , -15.160, -21.340, -27.510, -35.300, -43.700, -17.820,
0. , 0. , 0. , 0. , -21.100, -23.840, -25.260, 0. ,
))
VP = numpy.array((
0. , 0. , 0. ,
0. , 0. , -8.520, -11.540, -14.340, -17.910, -20.890, -8.510,
0. , 0. , 0. , 0. , -10.290, -12.410, -15.090, 0. ,
))
VS *= 1./HARTREE2EV
VP *= 1./HARTREE2EV
# *** HERE COMES THE OPTIMIZED SLATER_S EXPONENTS FOR THE EVALUATION
# OF THE OVERLAP INTEGRALS AND MOLECULAR DIPOLE MOMENTS.
ZS3 = numpy.array((
0. , 1.30, 0. ,
0. , 0. , 1.211156, 1.739391, 2.704546, 3.640575, 3.111270, 0. ,
0. , 0. , 0. , 1.629173, 1.926108, 1.719480, 3.430887, 0. ,
))
ZP3 = numpy.array((
0. , 0. , 0. ,
0. , 0. , 0.972826, 1.709645, 1.870839, 2.168448, 1.419860, 0. ,
0. , 0. , 0. , 1.381721, 1.590665, 1.403205, 1.627017, 0. ,
))
# *** BETA3 AND ALP3 ARE THE BOND PARAMETERS USED IN THE
# RESONANCE INTEGRAL AND THE CORE CORE REPULSION INTEGRAL RESPECTIVE
Bxy = numpy.array((
# H B C N O F Si P S Cl
0.244770,
0 , 0,
0 , 0, 0,
0 , 0, 0, 0,
0.185347, 0, 0, 0, 0.151324,
0.315011, 0, 0, 0, 0.250031, 0.419907,
0.360776, 0, 0, 0, 0.310959, 0.410886, 0.377342,
0.417759, 0, 0, 0, 0.349745, 0.464514, 0.458110, 0.659407,
0.195242, 0, 0, 0, 0.219591, 0.247494, 0.205347, 0.334044, 0.197464,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0, 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0,
0.289647, 0, 0, 0, 0 , 0.411377, 0 , 0 , 0 , 0, 0, 0, 0, 0.291703,
0.320118, 0, 0, 0, 0 , 0.457816, 0 , 0.470000, 0.300000, 0, 0, 0, 0, 0 , 0.311790,
0.220654, 0, 0, 0, 0 , 0.284620, 0.313170, 0.422890, 0 , 0, 0, 0, 0, 0 , 0 , 0.202489,
0.231653, 0, 0, 0, 0 , 0.315480, 0.302298, 0 , 0 , 0, 0, 0, 0, 0 , 0.277322, 0.221764, 0.258969,
))
BETA3 = lib.unpack_tril(Bxy)
del(Bxy)
Axy = numpy.array((
# H B C N O F Si P S Cl
1.489450,
0 , 0,
0 , 0, 0,
0 , 0, 0, 0,
2.090352, 0, 0, 0, 2.280544,
1.475836, 0, 0, 0, 2.138291, 1.371208,
0.589380, 0, 0, 0, 1.909763, 1.635259, 2.029618,
0.478901, 0, 0, 0, 2.484827, 1.820975, 1.873859, 1.537190,
3.771362, 0, 0, 0, 2.862183, 2.725913, 2.861667, 2.266949, 3.864997,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0, 0,
0 , 0, 0, 0, 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0,
0.940789, 0, 0, 0, 0 , 1.101382, 0 , 0 , 0 , 0, 0, 0, 0, 0.918432,
0.923170, 0, 0, 0, 0 , 1.029693, 0 , 1.662500, 1.750000, 0, 0, 0, 0, 0 , 1.186652,
1.700698, 0, 0, 0, 0 , 1.761370, 1.878176, 2.077240, 0 , 0, 0, 0, 0, 0 , 0 , 1.751617,
2.089404, 0, 0, 0, 0 , 1.676222, 1.817064, 0 , 0 , 0, 0, 0, 0, 0 , 1.543720, 1.950318, 1.792125,
))
ALP3 = lib.unpack_tril(Axy)
del(Axy)
# *** EISOL3 AND EHEAT3 ARE THE GS ELECTRONIC ENERGY OF THE NEUTRAL ATOM
# (IN E.V.) AND THE HEAT OF FORMATION IF THE FREE ATOM (IN KCAL/MOL)
EHEAT3 = numpy.array((
0. , 52.102, 0. ,
0. , 0. , 135.7, 170.89, 113.0, 59.559, 18.86, 0. ,
0. , 0. , 0. , 106.0 , 79.8 , 65.65 , 28.95, 0. ,
))
EISOL3 = numpy.array((
0. , -12.505, 0. ,
0. , 0. ,-61.70,-119.47,-187.51,-307.07,-475.00,0. ,
0. , 0. , 0. ,-90.98 ,-150.81,-229.15,-345.93,0. ,
))
# CORE IS THE CHARGE ON THE ATOM AS SEEN BY THE ELECTRONS
#
CORE = numpy.array((0,
1, 0,
1, 2, 3, 4, 5, 6, 7, 0,
1, 2, 3, 4, 5, 6, 7, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 2, 3, 4, 5, 6, 7, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9,10,11, 2, 3, 4, 5, 6, 7, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,
3, 4, 5, 6, 7, 8, 9,10,11, 2, 3, 4, 5, 6, 7, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, -2, -1, 0
))
# MINDO/3 parameters end
##############################
#
# MNDO-PM3 parameters end
#
# COMMON /PM3 / USSPM3(107), UPPPM3(107), UDDPM3(107), ZSPM3(107),
# ZPPM3(107), ZDPM3(107), BETASP(107), BETAPP(107), BETADP(107),
# ALPPM3(107), EISOLP(107), DDPM3(107), QQPM3(107), AMPM3(107),
# ADPM3(107), AQPM3(107) ,GSSPM3(107), GSPPM3(107), GPPPM3(107),
# GP2PM3(107), HSPPM3(107),POLVOP(107)
# MNDO-PM3 parameters end
##############################
# Gaussian functions for fitting to Slaters. These functions are
# STO-6G fits to slater exponents with exponents of 1. To fit
# to exponents of \zeta, you need only multiply each
# exponent by \zeta^2
# The rest of these functions can be obtained from Stewart,
# JCP 52, 431 (1970)
gexps_1s = [2.310303149e01,4.235915534e00,1.185056519e00,
4.070988982e-01,1.580884151e-01,6.510953954e-02]
gcoefs_1s = [9.163596280e-03,4.936149294e-02,1.685383049e-01,
3.705627997e-01,4.164915298e-01,1.303340841e-01]
gexps_2s = [2.768496241e01,5.077140627e00,1.426786050e00,
2.040335729e-01,9.260298399e-02,4.416183978e-02]
gcoefs_2s = [-4.151277819e-03,-2.067024148e-02,-5.150303337e-02,
3.346271174e-01,5.621061301e-01,1.712994697e-01]
gexps_2p = [5.868285913e00,1.530329631e00,5.475665231e-01,
2.288932733e-01,1.046655969e-01,4.948220127e-02]
gcoefs_2p = [7.924233646e-03,5.144104825e-02,1.898400060e-01,
4.049863191e-01,4.012362861e-01,1.051855189e-01]
gexps_3s = [3.273031938e00,9.200611311e-01,3.593349765e-01,
8.636686991e-02,4.797373812e-02,2.724741144e-02]
gcoefs_3s = [-6.775596947e-03,-5.639325779e-02,-1.587856086e-01,
5.534527651e-01,5.015351020e-01,7.223633674e-02]
gexps_3p = [5.077973607e00,1.340786940e00,2.248434849e-01,
1.131741848e-01,6.076408893e-02,3.315424265e-02]
gcoefs_3p = [-3.329929840e-03,-1.419488340e-02,1.639395770e-01,
4.485358256e-01,3.908813050e-01,7.411456232e-02]
gexps_3d = [2.488296923,7.981487853e-1,3.311327490e-1,
1.559114463e-1,7.877734732e-2,4.058484363e-2]
gcoefs_3d = [7.283828112e-3,5.386799363e-2,2.072139149e-1,
4.266269092e-1,3.843100204e-1,8.902827546e-2]
gexps_4s = [3.232838646,3.605788802e-1,1.717902487e-1,
5.277666487e-2,3.163400284e-2,1.874093091e-2]
gcoefs_4s = [1.374817488e-3,-8.666390043e-2,-3.130627309e-1,
7.812787397e-1,4.389247988-1,2.487178756e-2]
gexps_4p = [2.389722618, 7.960947826e-1,3.415541380e-1,
8.847434525e-2,4.958248334e-2,2.816929784e-2]
gcoefs_4p = [-1.665913575e-3,-1.657464971e-2,-5.958513378e-2,
4.053115554e-1,5.433958189e-1,1.20970491e-1]
# Here are the STO-6G values from Hehre, Stewart, Pople JCP 51,2657 (1969),
# and Hehre, Ditchfield, Stewart, Pople JCP 52, 2769 (1970)
# which are a little different, in that they use the same exponent for
# 2s,2p, and 3s,3p, which makes the fit a bit different.
gexps_old_2 = [1.03087e1,2.04036,6.34142e-1,
2.43977e-1,1.05960e-1,4.85690e-2]
gcoefs_old_2s = [-1.32528e-2,-4.69917e-2,-3.37854e-2,
2.50242e-1,2.95117e-1,2.40706e-1]
gcoefs_old_2p = [3.75970e-3,3.76794e-2,1.73897e-1,
4.18036e-1,4.25860e-1,1.017008e-1]
gexps_old_3 = [3.0817,8.24896e-1,3.09345e-1,
1.38468e-1,6.85210e-2,3.53133e-2]
gcoefs_old_3s = [-7.94313e-3,-7.10026e-2,-1.78503e-1,
1.51064e-1,7.35491e-1,2.76059e-1]
gcoefs_old_3p = [-7.13936e-3,-1.82928e-2,7.62162e-2,
4.14510e-1,4.88962e-1,1.05882e-1]
gexps = { # indexed by N,s_or_p:
(1,0) : gexps_1s,
(2,0) : gexps_2s,
(2,1) : gexps_2p,
(3,0) : gexps_3s,
(3,1) : gexps_3p
}
gcoefs = { # indexed by N,s_or_p:
(1,0) : gcoefs_1s,
(2,0) : gcoefs_2s,
(2,1) : gcoefs_2p,
(3,0) : gcoefs_3s,
(3,1) : gcoefs_3p
}
gexps_old = { # indexed by N,s_or_p:
(1,0) : gexps_1s,
(2,0) : gexps_old_2,
(2,1) : gexps_old_2,
(3,0) : gexps_old_3,
(3,1) : gexps_old_3
}
gcoefs_old = { # indexed by N,s_or_p:
(1,0) : gcoefs_1s,
(2,0) : gcoefs_old_2s,
(2,1) : gcoefs_old_2p,
(3,0) : gcoefs_3s,
(3,1) : gcoefs_3p
}
del(lib)
|
gkc1000/pyscf
|
pyscf/semiempirical/mopac_param.py
|
Python
|
apache-2.0
| 13,473
|
[
"Gaussian",
"PySCF"
] |
679b383a13b25e37caad91a9cf8fd4c3865c2bc0493488542dbe9d7c218c92ba
|
#!/usr/bin/python
"""
This module specifically deals with the photometric quantities
related to a Multi Gaussian Expansion models (Monnet et al. 1992, Emsellem et al. 1994). It
makes use of the MGE class defined in pygme.py module.
It includes the derivation of projected and deprojected photometry.
WARNING: this module is being uptdated regularly and may still contains some obvious bugs. A stable version will
be available hopefully before the end of 2012.
For questions, please contact Eric Emsellem at eric.emsellem@eso.org
"""
"""
Importing the most import modules
This MGE module requires NUMPY and SCIPY
"""
try:
import numpy as np
except ImportError:
raise Exception("numpy is required for pygme")
from numpy import exp, sqrt
try:
from scipy import special
except ImportError:
raise Exception("scipy is required for pygme")
from rwcfor import floatMGE
from paramMGE import paramMGE
from mge_miscfunctions import print_msg
__version__ = '1.1.1 (21/08/2013)'
#__version__ = '1.1.0 (28/08/2012)'
#__version__ = '1.0.0 (08/01/2012)'
# Version 1.1.1 Changed imin imax into ilist
# Version 1.1.0 Revised to only have R and Z for visible modules
# and not R2 and Z2
# Version 1.0.0 extracted from the older pygme.py
class photMGE(paramMGE):
def __init__(self, infilename=None, indir=None, saveMGE=None, **kwargs) :
paramMGE.__init__(self, infilename=infilename, indir=indir, saveMGE=saveMGE, **kwargs)
##################################################################
### Derive the spatial and projected densities
##################################################################
### MASS - 1 G --------------------------------------------------
### Deriving the spatial mass density for 1 gaussian
def _rho3D_1G_fromR2Z2(self, R2, Z2, ind) :
"""
Spatial Mass Density in Mass/pc-2/arcsec-1
for 1 Gaussian only: ind is the indice of that gaussian
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R2*0.
return self.Parc[ind] * exp(- (R2 + Z2 / self.QxZ2[ind]) / self._pParam.dSig3Darc2[ind]) # in Mass/pc-2/arcsec-1
### Deriving the projected mass density for 1 gaussian
def _rho2D_1G_fromX2Y2(self, X2, Y2, ind) :
"""
Projected Mass Density in Mass/pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss2D == 0 :
print_msg("No projected Gaussians yet", 2)
return X2*0.
return self.Pp[ind] * exp(- (X2 + Y2 / self.Q2D2[ind]) / self.dSig2Darc2[ind]) # in Mass/pc-2
###=================================================================
##################################################################
### Derive the spatial and projected densities
##################################################################
### MASS - 1 G --------------------------------------------------
### Deriving the spatial mass density for 1 gaussian
def rho3D_1G(self, R, Z, ind) :
"""
Spatial Mass Density in Mass/pc-2/arcsec-1
for 1 Gaussian only:
ind is the indice of that gaussian
R and Z are cylindrical coordinates [in arcseconds]
"""
return self._rho3D_1G_fromR2Z2(R*R, Z*Z, ind)
### Deriving the projected mass density for 1 gaussian
def rho2D_1G(self, X, Y, ind) :
"""
Projected Mass Density in Mass/pc-2
for 1 Gaussian only:
ind is the indice of that gaussian
X, Y are projected coordinates [in arcseconds]
"""
return self._rho2D_1G_fromX2Y2(X*X, Y*Y, ind)
###=================================================================
### MASS - ALL-----------------------------------------------------
### Deriving the spatial mass density for all
def _rho3D_fromR2Z2(self, R2, Z2, ilist=None) :
"""
Spatial Mass Density in Mass/pc-2/arcsec-1
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R2*0.
rho = np.zeros_like(R2)
for i in ilist :
rho += self._rho3D_1G_fromR2Z2(R2, Z2, i)
return rho # in Mass/pc-2/arcsec-1
### Deriving the projected mass density for all
def _rho2D_fromX2Y2(self, X2, Y2, ilist=None) :
"""
Projected Mass Density in Mass/pc-2
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss2D == 0 :
print_msg("No projected Gaussians yet", 2)
return X2*0.
rho2D = np.zeros_like(X2)
for i in ilist :
rho2D += self._rho2D_1G_fromX2Y2(X2, Y2, i)
return rho2D # in Mass/pc-2
###=============================================================================
### MASS - ALL-----------------------------------------------------
### Deriving the spatial mass density for all
def rho3D(self, R, Z, ilist=None) :
"""
Spatial Mass Density in Mass/pc-2/arcsec-1
Input :
R, Z the cylindrical coordinates [in arcseconds]
ilist indices for the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
return self._rho3D_fromR2Z2(R*R, Z*Z, ilist=ilist)
### Deriving the projected mass density for all
def rho2D(self, X, Y, ilist=None) :
"""
Projected Mass Density in Mass/pc-2
Input :
X, Y the projected coordinates [in arcseconds]
ilist indices for the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
return self._rho2D_fromX2Y2(X*X, Y*Y, ilist=ilist)
###=============================================================================
### LUMINOSITY - 1 G ------------------------------------------------------------
###=============================================================================
### Deriving the spatial luminosity density for 1 gaussian
def _rhoL3D_1G_fromR2Z2(self, R2, Z2, ind) :
"""
Spatial LUMINOSITY distribution in Lum.pc-2/arcsec-1
for 1 Gaussian only: ind is the indice of that gaussian
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R2*0.
return self.Imax3Darc[ind] * exp(- (R2 + Z2 / self.QxZ2[ind]) / self._pParam.dSig3Darc2[ind]) # I in Lum.pc-2/arcsec-1
### Deriving the spatial luminosity density for 1 gaussian
def rhoL3D_1G(self, R, Z, ind) :
"""
Spatial LUMINOSITY distribution in Lum.pc-2/arcsec-1 for 1 Gaussian only
Input :
ind is the indice of that gaussian
R, Z are the cylindrical coordinates [in arcseconds]
"""
return self._rhoLspatial_1G_fromR2Z2(R*R, Z*Z, ind)
###=============================================================================
### Deriving the projected luminosity density for 1 gaussian
def _rhoL2D_1G_fromX2Y2(self, X2, Y2, ind) :
"""
Projected LUMINOSITY distribution in Lum.pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R2 and Z2 are grids of R*R and Z*Z (should have the same size) [in arcseconds]
"""
if self._findGauss2D == 0 :
print_msg("No projected Gaussians yet", 2)
return X2*0.
return self.Imax2D[ind] * exp(- (X2 + Y2 / self.Q2D2[ind]) / self.dSig2Darc2[ind]) # I in Lum.pc-2
###=============================================================================
### Deriving the projected luminosity density for 1 gaussian
def rhoL2D_1G(self, X, Y, ind) :
"""
Projected LUMINOSITY distribution in Lum.pc-2
for 1 Gaussian only
Input :
ind is the indice of that gaussian
X, Y are the projected coordinates [in arcseconds]
"""
return self._rhoL2D_1G_fromX2Y2(X*X, Y*Y, ind)
###=============================================================================
### LUMINOSITY - ALL ------------------------------------------------------------
### Deriving the spatial luminosity density for all
def _rhoL3D_fromR2Z2(self, R2, Z2, ilist=None) :
"""
Spatial LUMINOSITY distribution in Lum.pc-2/arcsec-1
R2, Z2 are the squares of the cylindrical coordinates [in arcseconds]
ilist: indices for the Gaussians
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R2*0.
rhoL = np.zeros_like(R2)
for i in ilist :
rhoL += self._rhoLspatial_1G_fromR2Z2(R2, Z2, i)
return rhoL # I in Lum.pc-3
### Deriving the spatial luminosity density for all
def rhoL3D(self, R, Z, ilist=None) :
"""
Spatial LUMINOSITY distribution in Lum.pc-2/arcsec-1
Input :
R, Z = cylindrical coordinates [in arcseconds]
ilist: indices for the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
return self._rhoL3D_fromR2Z2(R*R, Z*Z, ilist=ilist)
###=============================================================================
### Deriving the projected luminosity density for all
def _rhoL2D_fromX2Y2(self, X2, Y2, ilist=None):
"""
Projected LUMINOSITY distribution in Lum.pc-2
X2, Y2 are the squares of the projected coordinates [in arcseconds]
ilist: indices for the Gaussians
"""
if self._findGauss2D == 0 :
print_msg("No projected Gaussians yet", 2)
return X2*0.
rhoL2D = np.zeros_like(X2)
for i in ilist :
rhoL2D += self._rhoL2D_1G_fromX2Y2(X2, Y2, i)
return rhoL2D # I in Lum.pc-2
###=============================================================================
### Deriving the projected luminosity density for all
def rhoL2D(self, X, Y, ilist=None):
"""
Projected LUMINOSITY distribution in Lum.pc-2
Input :
X, Y = projected coordinates [in arcseconds]
ilist: indices for the Gaussians
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
return self._rhoL2D_fromX2Y2(X*X, Y*Y, ilist=ilist)
###=============================================================================
### INTEGRATED LUMINOSITY - ONLY IN Z -------------------------------------------------
### Deriving the surface Lum for 1 gaussian, R is in arcsec
def FluxSurf_1G(self, R, ind) :
"""
Flux Surface density in Lum.pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R is a grid of radii in arcseconds
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
R2 = R * R
return self.Imax3Darc[ind] * sqrt(2. * np.pi) * self.qSig3Darc[ind] * exp(- R2 / self._pParam.dSig3Darc2[ind]) # in Lum.pc-2
### Deriving the integrated Lum (Zcut) for 1 gaussian, R and Z are in arcsec
def _rhointLZ_1G(self, R, Zcut, ind):
"""
Integrated luminosity (within Zcut in arcsec) in Lum.pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R is a grid of radii in arcseconds
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
return self.FluxSurf_1G(R, ind) * floatMGE(special.erf(Zcut / self._pParam.dqSig3Darc[ind])) # in Lum.pc-2
### Deriving the integrated Lum (Zcut) for all, R and are in arcsec
def rhointLZ(self, R, Zcut, ilist=None) :
"""
Integrated luminosity (within Zcut in arcsec) in Lum.pc-2
R is a grid of radii in arcseconds
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
rhointL = 0.
for i in ilist :
rhointL += self._rhointLZ_1G(R, Zcut, i)
return rhointL # in Lum.pc-2
###=============================================================================
### INTEGRATED MASS - ONLY IN Z --------------------------------------------------------
### Deriving the surface Mass for 1 gaussian, R is in arcsec
def MassSurf1(self, R, ind) :
"""
Mass Surface density in Mass.pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R is a grid of radii in arcseconds
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
R2 = R * R
return self.Parc[ind] * sqrt(2. * np.pi) * self.qSig3Darc[ind] * exp(- R2 / self._pParam.dSig3Darc2[ind]) # in Mass.pc-2
### Deriving the integrated Mass (Zcut) for 1 gaussian, R and are in arcsec
def rhointMZ1(self, R, Zcut, ind) :
"""
Integrated Mass (within Zcut in arcsec) in Mass.pc-2
for 1 Gaussian only: ind is the indice of that gaussian
R is a grid of radii in arcseconds
"""
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
return self.MassSurf1(R, ind) * float(special.erf(Zcut / self._pParam.dqSig3Darc[ind])) # in Mass.pc-2
### Deriving the integrated Mass (Rcut, Zcut) for all, R and are in arcsec
def rhointMZ(self, R, Zcut, ilist=None) :
"""
Integrated Mass (within Zcut in arcsec) in Mass.pc-2
R is a grid of radii in arcseconds
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return R * 0.
rhointM = 0.
for i in ilist :
rhointM += self.rhointMZ1(R, Zcut, i)
return rhointM # in Mass.pc-2
###=============================================================================
### INTEGRATED LUMINOSITY - SPHERE ALL -------------------------------------------------
### Deriving the integrated Lum (mcut) for all, m in arcsec
def rhoSphereintL(self, mcut, ilist=None) :
"""
Integrated LUMINOSITY truncated within a spheroid of m=mcut (in arcsec)
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
rhointL = 0.
for i in ilist :
rhointL += self.rhoSphereintL_1G(mcut, i)
return rhointL
###=============================================================================
### Deriving the integrated Mass (mcut) for all, m in arcsec
def rhoSphereintM(self, mcut, ilist=None) :
"""
Integrated Mass truncated within a spheroid of m=mcut (in arcsec)
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
rhointM = 0.
for i in ilist :
rhointM += self.rhoSphereintM_1G(mcut, i)
return rhointM
###=============================================================================
### Deriving the integrated Lum (Rcut, Zcut) for all, R and are in arcsec
def rhointL(self, Rcut, Zcut, ilist=None) :
"""
Integrated LUMINOSITY truncated within a cylindre defined by Rcut, Zcut (in arcsec)
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
rhointL = 0.
for i in ilist :
rhointL += self.rhointL_1G(Rcut, Zcut, i)
return rhointL
###=============================================================================
### INTEGRATED MASS - ALL --------------------------------------------------------
### Deriving the integrated Mass (Rcut, Zcut) for all, R and are in arcsec
def rhointM(self, Rcut, Zcut, ilist=None) :
"""
Integrated Mass truncated within a cylindre defined by Rcut, Zcut (in arcsec)
"""
### Set the list of indices
ilist = self._set_ilist(ilist)
if self._findGauss3D == 0 :
print_msg("No Spatial Gaussians yet", 2)
return 0.
rhointM = 0.
for i in ilist :
rhointM += self.rhointM_1G(Rcut, Zcut, i)
return rhointM
###=============================================================================
|
emsellem/pygme
|
pygme/photMGE.py
|
Python
|
bsd-3-clause
| 17,400
|
[
"Gaussian"
] |
90127eaeed970706a755bd51f3a95feac5d481a7599c31f399f74cbf4c6e1e31
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Emulation of the proper abstract syntax tree API for Python 2.4."""
import compiler
import compiler.ast
from genshi.template import _ast24 as _ast
__all__ = ['_ast', 'parse']
__docformat__ = 'restructuredtext en'
def _new(cls, *args, **kwargs):
ret = cls()
if ret._fields:
for attr, value in zip(ret._fields, args):
if attr in kwargs:
raise ValueError('Field set both in args and kwargs')
setattr(ret, attr, value)
for attr in kwargs:
if (getattr(ret, '_fields', None) and attr in ret._fields) \
or (getattr(ret, '_attributes', None) and
attr in ret._attributes):
setattr(ret, attr, kwargs[attr])
return ret
class ASTUpgrader(object):
"""Transformer changing structure of Python 2.4 ASTs to
Python 2.5 ones.
Transforms ``compiler.ast`` Abstract Syntax Tree to builtin ``_ast``.
It can use fake`` _ast`` classes and this way allow ``_ast`` emulation
in Python 2.4.
"""
def __init__(self):
self.out_flags = None
self.lines = [-1]
def _new(self, *args, **kwargs):
return _new(lineno = self.lines[-1], *args, **kwargs)
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
lno = getattr(node, 'lineno', None)
if lno is not None:
self.lines.append(lno)
visitor = getattr(self, 'visit_%s' % node.__class__.__name__, None)
if visitor is None:
raise Exception('Unhandled node type %r' % type(node))
retval = visitor(node)
if lno is not None:
self.lines.pop()
return retval
def visit_Module(self, node):
body = self.visit(node.node)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.Module, body)
def visit_Expression(self, node):
return self._new(_ast.Expression, self.visit(node.node))
def _extract_args(self, node):
tab = node.argnames[:]
if node.flags & compiler.ast.CO_VARKEYWORDS:
kwarg = tab[-1]
tab = tab[:-1]
else:
kwarg = None
if node.flags & compiler.ast.CO_VARARGS:
vararg = tab[-1]
tab = tab[:-1]
else:
vararg = None
def _tup(t):
if isinstance(t, str):
return self._new(_ast.Name, t, _ast.Store())
elif isinstance(t, tuple):
elts = [_tup(x) for x in t]
return self._new(_ast.Tuple, elts, _ast.Store())
else:
raise NotImplemented
args = []
for arg in tab:
if isinstance(arg, str):
args.append(self._new(_ast.Name, arg, _ast.Param()))
elif isinstance(arg, tuple):
args.append(_tup(arg))
else:
assert False, node.__class__
defaults = [self.visit(d) for d in node.defaults]
return self._new(_ast.arguments, args, vararg, kwarg, defaults)
def visit_Function(self, node):
if getattr(node, 'decorators', ()):
decorators = [self.visit(d) for d in node.decorators.nodes]
else:
decorators = []
args = self._extract_args(node)
body = self.visit(node.code)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.FunctionDef, node.name, args, body, decorators)
def visit_Class(self, node):
#self.name_types.append(_ast.Load)
bases = [self.visit(b) for b in node.bases]
#self.name_types.pop()
body = self.visit(node.code)
if node.doc:
body = [self._new(_ast.Expr, self._new(_ast.Str, node.doc))] + body
return self._new(_ast.ClassDef, node.name, bases, body)
def visit_Return(self, node):
return self._new(_ast.Return, self.visit(node.value))
def visit_Assign(self, node):
#self.name_types.append(_ast.Store)
targets = [self.visit(t) for t in node.nodes]
#self.name_types.pop()
return self._new(_ast.Assign, targets, self.visit(node.expr))
aug_operators = {
'+=': _ast.Add,
'/=': _ast.Div,
'//=': _ast.FloorDiv,
'<<=': _ast.LShift,
'%=': _ast.Mod,
'*=': _ast.Mult,
'**=': _ast.Pow,
'>>=': _ast.RShift,
'-=': _ast.Sub,
}
def visit_AugAssign(self, node):
target = self.visit(node.node)
# Because it's AugAssign target can't be list nor tuple
# so we only have to change context of one node
target.ctx = _ast.Store()
op = self.aug_operators[node.op]()
return self._new(_ast.AugAssign, target, op, self.visit(node.expr))
def _visit_Print(nl):
def _visit(self, node):
values = [self.visit(v) for v in node.nodes]
return self._new(_ast.Print, self.visit(node.dest), values, nl)
return _visit
visit_Print = _visit_Print(False)
visit_Printnl = _visit_Print(True)
del _visit_Print
def visit_For(self, node):
return self._new(_ast.For, self.visit(node.assign), self.visit(node.list),
self.visit(node.body), self.visit(node.else_))
def visit_While(self, node):
return self._new(_ast.While, self.visit(node.test), self.visit(node.body),
self.visit(node.else_))
def visit_If(self, node):
def _level(tests, else_):
test = self.visit(tests[0][0])
body = self.visit(tests[0][1])
if len(tests) == 1:
orelse = self.visit(else_)
else:
orelse = [_level(tests[1:], else_)]
return self._new(_ast.If, test, body, orelse)
return _level(node.tests, node.else_)
def visit_With(self, node):
return self._new(_ast.With, self.visit(node.expr),
self.visit(node.vars), self.visit(node.body))
def visit_Raise(self, node):
return self._new(_ast.Raise, self.visit(node.expr1),
self.visit(node.expr2), self.visit(node.expr3))
def visit_TryExcept(self, node):
handlers = []
for type, name, body in node.handlers:
handlers.append(self._new(_ast.excepthandler, self.visit(type),
self.visit(name), self.visit(body)))
return self._new(_ast.TryExcept, self.visit(node.body),
handlers, self.visit(node.else_))
def visit_TryFinally(self, node):
return self._new(_ast.TryFinally, self.visit(node.body),
self.visit(node.final))
def visit_Assert(self, node):
return self._new(_ast.Assert, self.visit(node.test), self.visit(node.fail))
def visit_Import(self, node):
names = [self._new(_ast.alias, n[0], n[1]) for n in node.names]
return self._new(_ast.Import, names)
def visit_From(self, node):
names = [self._new(_ast.alias, n[0], n[1]) for n in node.names]
return self._new(_ast.ImportFrom, node.modname, names, 0)
def visit_Exec(self, node):
return self._new(_ast.Exec, self.visit(node.expr),
self.visit(node.locals), self.visit(node.globals))
def visit_Global(self, node):
return self._new(_ast.Global, node.names[:])
def visit_Discard(self, node):
return self._new(_ast.Expr, self.visit(node.expr))
def _map_class(to):
def _visit(self, node):
return self._new(to)
return _visit
visit_Pass = _map_class(_ast.Pass)
visit_Break = _map_class(_ast.Break)
visit_Continue = _map_class(_ast.Continue)
def _visit_BinOperator(opcls):
def _visit(self, node):
return self._new(_ast.BinOp, self.visit(node.left),
opcls(), self.visit(node.right))
return _visit
visit_Add = _visit_BinOperator(_ast.Add)
visit_Div = _visit_BinOperator(_ast.Div)
visit_FloorDiv = _visit_BinOperator(_ast.FloorDiv)
visit_LeftShift = _visit_BinOperator(_ast.LShift)
visit_Mod = _visit_BinOperator(_ast.Mod)
visit_Mul = _visit_BinOperator(_ast.Mult)
visit_Power = _visit_BinOperator(_ast.Pow)
visit_RightShift = _visit_BinOperator(_ast.RShift)
visit_Sub = _visit_BinOperator(_ast.Sub)
del _visit_BinOperator
def _visit_BitOperator(opcls):
def _visit(self, node):
def _make(nodes):
if len(nodes) == 1:
return self.visit(nodes[0])
left = _make(nodes[:-1])
right = self.visit(nodes[-1])
return self._new(_ast.BinOp, left, opcls(), right)
return _make(node.nodes)
return _visit
visit_Bitand = _visit_BitOperator(_ast.BitAnd)
visit_Bitor = _visit_BitOperator(_ast.BitOr)
visit_Bitxor = _visit_BitOperator(_ast.BitXor)
del _visit_BitOperator
def _visit_UnaryOperator(opcls):
def _visit(self, node):
return self._new(_ast.UnaryOp, opcls(), self.visit(node.expr))
return _visit
visit_Invert = _visit_UnaryOperator(_ast.Invert)
visit_Not = _visit_UnaryOperator(_ast.Not)
visit_UnaryAdd = _visit_UnaryOperator(_ast.UAdd)
visit_UnarySub = _visit_UnaryOperator(_ast.USub)
del _visit_UnaryOperator
def _visit_BoolOperator(opcls):
def _visit(self, node):
values = [self.visit(n) for n in node.nodes]
return self._new(_ast.BoolOp, opcls(), values)
return _visit
visit_And = _visit_BoolOperator(_ast.And)
visit_Or = _visit_BoolOperator(_ast.Or)
del _visit_BoolOperator
cmp_operators = {
'==': _ast.Eq,
'!=': _ast.NotEq,
'<': _ast.Lt,
'<=': _ast.LtE,
'>': _ast.Gt,
'>=': _ast.GtE,
'is': _ast.Is,
'is not': _ast.IsNot,
'in': _ast.In,
'not in': _ast.NotIn,
}
def visit_Compare(self, node):
left = self.visit(node.expr)
ops = []
comparators = []
for optype, expr in node.ops:
ops.append(self.cmp_operators[optype]())
comparators.append(self.visit(expr))
return self._new(_ast.Compare, left, ops, comparators)
def visit_Lambda(self, node):
args = self._extract_args(node)
body = self.visit(node.code)
return self._new(_ast.Lambda, args, body)
def visit_IfExp(self, node):
return self._new(_ast.IfExp, self.visit(node.test), self.visit(node.then),
self.visit(node.else_))
def visit_Dict(self, node):
keys = [self.visit(x[0]) for x in node.items]
values = [self.visit(x[1]) for x in node.items]
return self._new(_ast.Dict, keys, values)
def visit_ListComp(self, node):
generators = [self.visit(q) for q in node.quals]
return self._new(_ast.ListComp, self.visit(node.expr), generators)
def visit_GenExprInner(self, node):
generators = [self.visit(q) for q in node.quals]
return self._new(_ast.GeneratorExp, self.visit(node.expr), generators)
def visit_GenExpr(self, node):
return self.visit(node.code)
def visit_GenExprFor(self, node):
ifs = [self.visit(i) for i in node.ifs]
return self._new(_ast.comprehension, self.visit(node.assign),
self.visit(node.iter), ifs)
def visit_ListCompFor(self, node):
ifs = [self.visit(i) for i in node.ifs]
return self._new(_ast.comprehension, self.visit(node.assign),
self.visit(node.list), ifs)
def visit_GenExprIf(self, node):
return self.visit(node.test)
visit_ListCompIf = visit_GenExprIf
def visit_Yield(self, node):
return self._new(_ast.Yield, self.visit(node.value))
def visit_CallFunc(self, node):
args = []
keywords = []
for arg in node.args:
if isinstance(arg, compiler.ast.Keyword):
keywords.append(self._new(_ast.keyword, arg.name,
self.visit(arg.expr)))
else:
args.append(self.visit(arg))
return self._new(_ast.Call, self.visit(node.node), args, keywords,
self.visit(node.star_args), self.visit(node.dstar_args))
def visit_Backquote(self, node):
return self._new(_ast.Repr, self.visit(node.expr))
def visit_Const(self, node):
if node.value is None: # appears in slices
return None
elif isinstance(node.value, basestring):
return self._new(_ast.Str, node.value)
else:
return self._new(_ast.Num, node.value)
def visit_Name(self, node):
return self._new(_ast.Name, node.name, _ast.Load())
def visit_Getattr(self, node):
return self._new(_ast.Attribute, self.visit(node.expr), node.attrname,
_ast.Load())
def visit_Tuple(self, node):
nodes = [self.visit(n) for n in node.nodes]
return self._new(_ast.Tuple, nodes, _ast.Load())
def visit_List(self, node):
nodes = [self.visit(n) for n in node.nodes]
return self._new(_ast.List, nodes, _ast.Load())
def get_ctx(self, flags):
if flags == 'OP_DELETE':
return _ast.Del()
elif flags == 'OP_APPLY':
return _ast.Load()
elif flags == 'OP_ASSIGN':
return _ast.Store()
else:
# FIXME Exception here
assert False, repr(flags)
def visit_AssName(self, node):
self.out_flags = node.flags
ctx = self.get_ctx(node.flags)
return self._new(_ast.Name, node.name, ctx)
def visit_AssAttr(self, node):
self.out_flags = node.flags
ctx = self.get_ctx(node.flags)
return self._new(_ast.Attribute, self.visit(node.expr),
node.attrname, ctx)
def _visit_AssCollection(cls):
def _visit(self, node):
flags = None
elts = []
for n in node.nodes:
elts.append(self.visit(n))
if flags is None:
flags = self.out_flags
else:
assert flags == self.out_flags
self.out_flags = flags
ctx = self.get_ctx(flags)
return self._new(cls, elts, ctx)
return _visit
visit_AssList = _visit_AssCollection(_ast.List)
visit_AssTuple = _visit_AssCollection(_ast.Tuple)
del _visit_AssCollection
def visit_Slice(self, node):
lower = self.visit(node.lower)
upper = self.visit(node.upper)
ctx = self.get_ctx(node.flags)
self.out_flags = node.flags
return self._new(_ast.Subscript, self.visit(node.expr),
self._new(_ast.Slice, lower, upper, None), ctx)
def visit_Subscript(self, node):
ctx = self.get_ctx(node.flags)
subs = [self.visit(s) for s in node.subs]
advanced = (_ast.Slice, _ast.Ellipsis)
slices = []
nonindex = False
for sub in subs:
if isinstance(sub, advanced):
nonindex = True
slices.append(sub)
else:
slices.append(self._new(_ast.Index, sub))
if len(slices) == 1:
slice = slices[0]
elif nonindex:
slice = self._new(_ast.ExtSlice, slices)
else:
slice = self._new(_ast.Tuple, slices, _ast.Load())
self.out_flags = node.flags
return self._new(_ast.Subscript, self.visit(node.expr), slice, ctx)
def visit_Sliceobj(self, node):
a = [self.visit(n) for n in node.nodes + [None]*(3 - len(node.nodes))]
return self._new(_ast.Slice, a[0], a[1], a[2])
def visit_Ellipsis(self, node):
return self._new(_ast.Ellipsis)
def visit_Stmt(self, node):
def _check_del(n):
# del x is just AssName('x', 'OP_DELETE')
# we want to transform it to Delete([Name('x', Del())])
dcls = (_ast.Name, _ast.List, _ast.Subscript, _ast.Attribute)
if isinstance(n, dcls) and isinstance(n.ctx, _ast.Del):
return self._new(_ast.Delete, [n])
elif isinstance(n, _ast.Tuple) and isinstance(n.ctx, _ast.Del):
# unpack last tuple to avoid making del (x, y, z,);
# out of del x, y, z; (there's no difference between
# this two in compiler.ast)
return self._new(_ast.Delete, n.elts)
else:
return n
def _keep(n):
if isinstance(n, _ast.Expr) and n.value is None:
return False
else:
return True
return [s for s in [_check_del(self.visit(n)) for n in node.nodes]
if _keep(s)]
def parse(source, mode):
node = compiler.parse(source, mode)
return ASTUpgrader().visit(node)
|
OpenClovis/SAFplus-Availability-Scalability-Platform
|
src/ide/genshi/genshi/template/ast24.py
|
Python
|
gpl-2.0
| 17,676
|
[
"VisIt"
] |
e2561ac756f66dac0374b9d6b95cfe3f44cad27cbfb65695d9f524762983fbd7
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbhealthmonitor
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbHealthMonitor Avi RESTful Object
description:
- This module is used to configure GslbHealthMonitor object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_monitor:
description:
- Healthmonitordns settings for gslbhealthmonitor.
external_monitor:
description:
- Healthmonitorexternal settings for gslbhealthmonitor.
failed_checks:
description:
- Number of continuous failed health checks before the server is marked down.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
http_monitor:
description:
- Healthmonitorhttp settings for gslbhealthmonitor.
https_monitor:
description:
- Healthmonitorhttp settings for gslbhealthmonitor.
monitor_port:
description:
- Use this port instead of the port defined for the server in the pool.
- If the monitor succeeds to this port, the load balanced traffic will still be sent to the port of the server defined within the pool.
- Allowed values are 1-65535.
- Special values are 0 - 'use server port'.
name:
description:
- A user friendly name for this health monitor.
required: true
receive_timeout:
description:
- A valid response from the server is expected within the receive timeout window.
- This timeout must be less than the send interval.
- If server status is regularly flapping up and down, consider increasing this value.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
send_interval:
description:
- Frequency, in seconds, that monitors are sent to a server.
- Allowed values are 1-3600.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.
successful_checks:
description:
- Number of continuous successful health checks before server is marked up.
- Allowed values are 1-50.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
tcp_monitor:
description:
- Healthmonitortcp settings for gslbhealthmonitor.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of the health monitor.
- Enum options - HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP, HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_EXTERNAL, HEALTH_MONITOR_UDP,
- HEALTH_MONITOR_DNS, HEALTH_MONITOR_GSLB.
required: true
udp_monitor:
description:
- Healthmonitorudp settings for gslbhealthmonitor.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the health monitor.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbHealthMonitor object
avi_gslbhealthmonitor:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbhealthmonitor
"""
RETURN = '''
obj:
description: GslbHealthMonitor (api/gslbhealthmonitor) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_monitor=dict(type='dict',),
external_monitor=dict(type='dict',),
failed_checks=dict(type='int',),
http_monitor=dict(type='dict',),
https_monitor=dict(type='dict',),
monitor_port=dict(type='int',),
name=dict(type='str', required=True),
receive_timeout=dict(type='int',),
send_interval=dict(type='int',),
successful_checks=dict(type='int',),
tcp_monitor=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
udp_monitor=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbhealthmonitor',
set([]))
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/avi/avi_gslbhealthmonitor.py
|
Python
|
gpl-3.0
| 6,440
|
[
"VisIt"
] |
5db2e4e398c5a0146de2817587c70c1b225046ae5f8fced0bb3fe72ac6e0c69e
|
import unittest
import galaxy.model.mapping as mapping
class MappingTests( unittest.TestCase ):
def test_basic( self ):
# Start the database and connect the mapping
model = mapping.init( "/tmp", "sqlite:///:memory:", create_tables=True )
assert model.engine is not None
# Make some changes and commit them
u = model.User( email="james@foo.bar.baz", password="password" )
h1 = model.History( name="History 1", user=u)
#h1.queries.append( model.Query( "h1->q1" ) )
#h1.queries.append( model.Query( "h1->q2" ) )
h2 = model.History( name=( "H" * 1024 ) )
#q1 = model.Query( "h2->q1" )
d1 = model.Dataset( metadata=dict(chromCol=1,startCol=2,endCol=3 ), history=h2 )
#h2.queries.append( q1 )
#h2.queries.append( model.Query( "h2->q2" ) )
model.context.current.flush()
model.context.current.clear()
# Check
users = model.User.select()
assert len( users ) == 1
assert users[0].email == "james@foo.bar.baz"
assert users[0].password == "password"
assert len( users[0].histories ) == 1
assert users[0].histories[0].name == "History 1"
hists = model.History.select()
assert hists[0].name == "History 1"
assert hists[1].name == ( "H" * 255 )
assert hists[0].user == users[0]
assert hists[1].user is None
assert hists[1].datasets[0].metadata['chromCol'] == 1
assert hists[1].datasets[0].file_name == "/tmp/dataset_%d.dat" % hists[1].datasets[0].id
# Do an update and check
hists[1].name = "History 2b"
model.context.current.flush()
model.context.current.clear()
hists = model.History.select()
assert hists[0].name == "History 1"
assert hists[1].name == "History 2b"
def get_suite():
suite = unittest.TestSuite()
suite.addTest( MappingTests( "test_basic" ) )
return suite
|
jmchilton/galaxy-central
|
galaxy/model/mapping_tests.py
|
Python
|
mit
| 1,976
|
[
"Galaxy"
] |
a55842804e0b03f98aef8e634474115e0bc7684afecdf3b0d2c9569b61df7937
|
../../../../../../../share/pyshared/orca/scripts/apps/notify-osd/__init__.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/notify-osd/__init__.py
|
Python
|
gpl-3.0
| 76
|
[
"ORCA"
] |
6c56a4d873bf92247f8791c1f22d8cb374e6e5b1bf1390f02bc72af23aa1a3e7
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Migrate from 0.14 to 0.15
Revision ID: 49a4a1e3779a
Revises: 057b088bfb32
Create Date: 2014-10-09 12:24:58.333096
"""
# revision identifiers, used by Alembic.
revision = '49a4a1e3779a'
down_revision = '057b088bfb32'
from alembic import op
import sqlalchemy as sa
def upgrade():
# System access policies
op.create_table('system_access_policy',
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
sa.Column('system_id', sa.Integer,
sa.ForeignKey('system.id', name='system_access_policy_system_id_fk')),
mysql_engine='InnoDB'
)
op.create_table('system_access_policy_rule',
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
sa.Column('policy_id', sa.Integer, sa.ForeignKey('system_access_policy.id',
name='system_access_policy_rule_policy_id_fk'), nullable=False),
sa.Column('user_id', sa.Integer, sa.ForeignKey('tg_user.user_id',
name='system_access_policy_rule_user_id_fk')),
sa.Column('group_id', sa.Integer, sa.ForeignKey('tg_group.group_id',
name='system_access_policy_rule_group_id_fk')),
sa.Column('permission', sa.Enum('edit_policy', 'edit_system',
'loan_any', 'loan_self', 'control_system', 'reserve')),
mysql_engine='InnoDB'
)
op.execute("""
INSERT INTO system_access_policy (system_id)
SELECT id FROM system
WHERE NOT EXISTS (SELECT 1 FROM system_access_policy
WHERE system_id = system.id)
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, NULL, 'control_system'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
WHERE NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id IS NULL
AND permission = 'control_system')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, NULL, 'reserve'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
WHERE shared = TRUE
AND NOT EXISTS (SELECT 1 FROM system_group
WHERE system_id = system.id)
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id IS NULL
AND permission = 'reserve')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, system_group.group_id, 'reserve'
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
INNER JOIN system_group ON system_group.system_id = system.id
WHERE shared = TRUE
AND system_group.admin = FALSE
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id = system_group.group_id
AND permission = 'reserve')
""")
op.execute("""
INSERT INTO system_access_policy_rule
(policy_id, user_id, group_id, permission)
SELECT system_access_policy.id, NULL, system_group.group_id, permission.p
FROM system_access_policy
INNER JOIN system ON system_access_policy.system_id = system.id
INNER JOIN system_group ON system_group.system_id = system.id
JOIN (SELECT 'edit_policy' p
UNION SELECT 'edit_system' p
UNION SELECT 'loan_any' p
UNION SELECT 'loan_self' p
UNION SELECT 'control_system' p
UNION SELECT 'reserve' p) permission
WHERE system_group.admin = TRUE
AND NOT EXISTS (SELECT 1 FROM system_access_policy_rule
WHERE policy_id = system_access_policy.id
AND user_id IS NULL
AND group_id = system_group.group_id
AND permission = permission.p)
""")
# TurboGears Visit framework
# These don't contain any important data, just transient login sessions, so
# we can safely drop them during upgrade, and re-create them empty during
# downgrade.
op.drop_table('visit')
op.drop_table('visit_identity')
# Group name length
op.alter_column('tg_group', 'group_name',
type_=sa.Unicode(255), nullable=False)
# Task RPM filename
op.alter_column('task', 'rpm', type_=sa.Unicode(255))
op.create_unique_constraint('rpm', 'task', ['rpm'])
def downgrade():
# System access policies
op.drop_table('system_access_policy_rule')
op.drop_table('system_access_policy')
# TurboGears Visit framework
op.create_table('visit',
sa.Column('visit_key', sa.String(40), primary_key=True),
sa.Column('created', sa.DateTime, nullable=False),
sa.Column('expiry', sa.DateTime),
mysql_engine='InnoDB'
)
op.create_table('visit_identity',
sa.Column('visit_key', sa.String(40), primary_key=True),
sa.Column('user_id', sa.Integer,
sa.ForeignKey('tg_user.user_id'), nullable=False),
sa.Column('proxied_by_user_id', sa.Integer,
sa.ForeignKey('tg_user.user_id')),
mysql_engine='InnoDB'
)
# Group name length
op.alter_column('tg_group', 'group_name',
type_=sa.Unicode(16), nullable=False)
# Task RPM filename
op.drop_index('rpm', 'task')
op.alter_column('task', 'rpm', type_=sa.Unicode(2048))
|
jtoppins/beaker
|
Server/bkr/server/alembic/versions/49a4a1e3779a_014_to_015.py
|
Python
|
gpl-2.0
| 6,218
|
[
"VisIt"
] |
722f87319e6be93aab21448947030c5b03dfc51f972b2529f28e9aa59365e84a
|
#pylint: disable=no-name-in-module
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import CrystalFieldEnergies
import numpy as np
import warnings
def _unpack_complex_matrix(packed, n_rows, n_cols):
unpacked = np.ndarray(n_rows * n_cols, dtype=complex).reshape((n_rows, n_cols))
for i in range(n_rows):
for j in range(n_cols):
k = 2 * (i * n_cols + j)
value = complex(packed[k], packed[k + 1])
unpacked[i, j] = value
return unpacked
def energies(nre, **kwargs):
"""
Calculate the crystal field energies and wavefunctions.
Args:
nre: a number denoting a rare earth ion
|1=Ce|2=Pr|3=Nd|4=Pm|5=Sm|6=Eu|7=Gd|8=Tb|9=Dy|10=Ho|11=Er|12=Tm|13=Yb|
kwargs: the keyword arguments for crystal field parameters.
They can be:
B20, B22, B40, B42, B44, ... : real parts of the crystal field parameters.
IB20, IB22, IB40, IB42, IB44, ... : imaginary parts of the crystal field parameters.
BmolX, BmolY, BmolZ: 3 molecular field parameters
BextX, BextY, BextZ: 3 external field parameters
Return:
a tuple of energies (1D numpy array), wavefunctions (2D numpy array)
and the hamiltonian (2D numpy array).
"""
warnings.warn('This function is under development and can be changed/removed in the future',
FutureWarning)
# Do the calculations
res = CrystalFieldEnergies(nre, **kwargs)
# Unpack the results
eigenvalues = res[0]
dim = len(eigenvalues)
eigenvectors = _unpack_complex_matrix(res[1], dim, dim)
hamiltonian = _unpack_complex_matrix(res[2], dim, dim)
return eigenvalues, eigenvectors, hamiltonian
|
wdzhou/mantid
|
scripts/Inelastic/CrystalField/energies.py
|
Python
|
gpl-3.0
| 1,767
|
[
"CRYSTAL"
] |
6f80281c45b495dbf454fb7d85799df86a364745869fdaff0732e8f4238ec296
|
# -*- coding: utf-8 -*-
{
'!=': '!=',
'!langcode!': 'nl',
'!langname!': 'Nederlands',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%(nrows)s records found': '%(nrows)s records gevonden',
'%d days ago': '%d dagen geleden',
'%d weeks ago': '%d weken gelden',
'%s %%{row} deleted': '%s rijen verwijderd',
'%s %%{row} updated': '%s rijen geupdate',
'%s selected': '%s geselecteerd',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(zoiets als "nl-nl")',
'+ And': '+ And',
'+ Or': '+ Or',
'1 day ago': '1 dag geleden',
'1 week ago': '1 week gelden',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'A new version of web2py is available': 'Een nieuwe versie van web2py is beschikbaar',
'A new version of web2py is available: %s': 'Een nieuwe versie van web2py is beschikbaar: %s',
'About': 'Over',
'about': 'over',
'About application': 'Over applicatie',
'Access Control': 'Toegangscontrole',
'Active From': 'Active From',
'Active To': 'Active To',
'Add': 'Toevoegen',
'Add Record': 'Add Record',
'Add record to database': 'Add record to database',
'Add this to the search as an AND term': 'Add this to the search as an AND term',
'Add this to the search as an OR term': 'Add this to the search as an OR term',
'additional code for your application': 'additionele code voor je applicatie',
'admin disabled because no admin password': 'admin is uitgezet omdat er geen admin wachtwoord is',
'admin disabled because not supported on google app engine': 'admin is uitgezet omdat dit niet ondersteund wordt op google app engine',
'admin disabled because unable to access password file': 'admin is uitgezet omdat het wachtwoordbestand niet geopend kan worden',
'Admin is disabled because insecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Admin is disabled because unsecure channel': 'Admin is uitgezet om het kanaal onveilig is',
'Administration': 'Administratie',
'Administrative Interface': 'Administratieve Interface',
'Administrator Password:': 'Administrator Wachtwoord',
'Ajax Recipes': 'Ajax Recepten',
'And': 'En',
'and rename it (required):': 'en hernoem deze (vereist)',
'and rename it:': 'en hernoem:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin is uitgezet vanwege een onveilig kanaal',
'application "%s" uninstalled': 'applicatie "%s" gedeïnstalleerd',
'application compiled': 'applicatie gecompileerd',
'application is compiled and cannot be designed': 'applicatie is gecompileerd en kan niet worden ontworpen',
'Apply changes': 'Apply changes',
'Are you sure you want to delete file "%s"?': 'Weet je zeker dat je bestand "%s" wilt verwijderen?',
'Are you sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Are you sure you want to uninstall application "%s"?': 'Weet je zeker dat je applicatie "%s" wilt deïnstalleren?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'LET OP: Login vereist een beveiligde (HTTPS) connectie of moet draaien op localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'LET OP: TESTEN IS NIET THREAD SAFE, PROBEER NIET GELIJKTIJDIG MEERDERE TESTS TE DOEN.',
'ATTENTION: you cannot edit the running application!': 'LET OP: je kan de applicatie die nu draait niet editen!',
'Authentication': 'Authenticatie',
'Available Databases and Tables': 'Beschikbare databases en tabellen',
'Back': 'Terug',
'Buy this book': 'Koop dit boek',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errors en sessies geleegd',
'Cannot be empty': 'Mag niet leeg zijn',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Kan niet compileren: er bevinden zich fouten in je app. Debug, corrigeer de fouten en probeer opnieuw.',
'cannot create file': 'kan bestand niet maken',
'cannot upload file "%(filename)s"': 'kan bestand "%(filename)s" niet uploaden',
'Change Password': 'Wijzig wachtwoord',
'Change password': 'Wijzig Wachtwoord',
'change password': 'wijzig wachtwoord',
'check all': 'vink alles aan',
'Check to delete': 'Vink aan om te verwijderen',
'clean': 'leeg',
'Clear': 'Leeg',
'Clear CACHE?': 'Leeg CACHE?',
'Clear DISK': 'Leeg DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Klik om voor upgrades te controleren',
'Client IP': 'Client IP',
'Client side': 'Client side',
'Close': 'Close',
'Comma-separated export including columns not shown; fields from other tables are exported as raw values for faster export': 'Comma-separated export including columns not shown; fields from other tables are exported as raw values for faster export',
'Comma-separated export of visible columns. Fields from other tables are exported as they appear on-screen but this may be slow for many rows': 'Comma-separated export of visible columns. Fields from other tables are exported as they appear on-screen but this may be slow for many rows',
'Community': 'Community',
'compile': 'compileren',
'compiled application removed': 'gecompileerde applicatie verwijderd',
'Components and Plugins': 'Components en Plugins',
'contains': 'bevat',
'Controller': 'Controller',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Copyright': 'Copyright',
'create file with filename:': 'maak bestand met de naam:',
'Create new application': 'Maak nieuwe applicatie:',
'create new application:': 'maak nieuwe applicatie',
'Created By': 'Gemaakt Door',
'Created On': 'Gemaakt Op',
'crontab': 'crontab',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (hidden cols)',
'Current request': 'Huidige request',
'Current response': 'Huidige response',
'Current session': 'Huidige sessie',
'currently saved or': 'op het moment opgeslagen of',
'customize me!': 'pas me aan!',
'data uploaded': 'data geupload',
'Database': 'Database',
'Database %s select': 'Database %s select',
'database administration': 'database administratie',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'Date and Time': 'Datum en Tijd',
'db': 'db',
'DB Model': 'DB Model',
'defines tables': 'definieer tabellen',
'Delete': 'Verwijder',
'delete': 'verwijder',
'delete all checked': 'verwijder alle aangevinkten',
'Delete:': 'Verwijder:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Deploy op Google App Engine',
'Deployment Recipes': 'Deployment Recepten',
'Description': 'Beschrijving',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design voor',
'Destination': 'Destination',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Geleegd',
'Documentation': 'Documentatie',
"Don't know what to do?": 'Weet je niet wat je moet doen?',
'done!': 'gereed!',
'Download': 'Download',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail ongeldig',
'edit': 'bewerk',
'EDIT': 'BEWERK',
'Edit': 'Bewerk',
'Edit application': 'Bewerk applicatie',
'edit controller': 'bewerk controller',
'Edit current record': 'Bewerk huidig record',
'Edit Profile': 'Bewerk Profiel',
'edit profile': 'bewerk profiel',
'Edit This App': 'Bewerk Deze App',
'Editing file': 'Bewerk bestand',
'Editing file "%s"': 'Bewerk bestand "%s"',
'Email and SMS': 'E-mail en SMS',
'enter a number between %(min)g and %(max)g': 'geef een getal tussen %(min)g en %(max)g',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'geef een integer tussen %(min)g en %(max)g',
'Error logs for "%(app)s"': 'Error logs voor "%(app)s"',
'errors': 'errors',
'Errors': 'Errors',
'Export': 'Export',
'export as csv file': 'exporteer als csv-bestand',
'Export:': 'Export:',
'exposes': 'stelt bloot',
'extends': 'extends',
'failed to reload module': 'niet gelukt om module te herladen',
'False': 'Onwaar',
'FAQ': 'FAQ',
'file "%(filename)s" created': 'bestand "%(filename)s" gemaakt',
'file "%(filename)s" deleted': 'bestand "%(filename)s" verwijderd',
'file "%(filename)s" uploaded': 'bestand "%(filename)s" geupload',
'file "%(filename)s" was not deleted': 'bestand "%(filename)s" was niet verwijderd',
'file "%s" of %s restored': 'bestand "%s" van %s hersteld',
'file changed on disk': 'bestand aangepast op schijf',
'file does not exist': 'bestand bestaat niet',
'file saved on %(time)s': 'bestand bewaard op %(time)s',
'file saved on %s': 'bestand bewaard op %s',
'First name': 'Voornaam',
'Forbidden': 'Verboden',
'Forgot username?': 'Forgot username?',
'Forms and Validators': 'Formulieren en Validators',
'Free Applications': 'Gratis Applicaties',
'Functions with no doctests will result in [passed] tests.': 'Functies zonder doctests zullen resulteren in [passed] tests.',
'Group %(group_id)s created': 'Groep %(group_id)s gemaakt',
'Group ID': 'Groep ID',
'Group uniquely assigned to user %(id)s': 'Groep is uniek toegekend aan gebruiker %(id)s',
'Groups': 'Groepen',
'Hello World': 'Hallo Wereld',
'help': 'help',
'Home': 'Home',
'How did you get here?': 'Hoe ben je hier gekomen?',
'HTML': 'HTML',
'HTML export of visible columns': 'HTML export of visible columns',
'htmledit': 'Bewerk HTML',
'Id': 'Id',
'import': 'import',
'Import/Export': 'Import/Export',
'in': 'in',
'includes': 'includes',
'Index': 'Index',
'insert new': 'voeg nieuwe',
'insert new %s': 'voeg nieuwe %s',
'Installed applications': 'Geïnstalleerde applicaties',
'internal error': 'interne error',
'Internal State': 'Interne State',
'Introduction': 'Introductie',
'Invalid action': 'Ongeldige actie',
'Invalid email': 'Ongeldig emailadres',
'Invalid login': 'Invalid login',
'invalid password': 'ongeldig wachtwoord',
'Invalid password': 'Ongeldig wachtwoord',
'Invalid Query': 'Ongeldige Query',
'invalid request': 'ongeldige request',
'invalid ticket': 'ongeldige ticket',
'Invalid username': 'Invalid username',
'Is Active': 'Is Actief',
'JSON': 'JSON',
'JSON export of visible columns': 'JSON export of visible columns',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'taalbestand "%(filename)s" gemaakt/geupdate',
'Language files (static strings) updated': 'Taalbestanden (statische strings) geupdate',
'languages': 'talen',
'Languages': 'Talen',
'languages updated': 'talen geupdate',
'Last name': 'Achternaam',
'Last saved on:': 'Laatst bewaard op:',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': 'Licentie voor',
'Live Chat': 'Live Chat',
'loading...': 'laden...',
'Log In': 'Log In',
'Log Out': 'Log Out',
'Logged in': 'Ingelogd',
'Logged out': 'Uitgelogd',
'Login': 'Login',
'login': 'login',
'Login to the Administrative Interface': 'Inloggen op de Administratieve Interface',
'logout': 'logout',
'Logout': 'Logout',
'Lost Password': 'Wachtwoord Kwijt',
'Lost password?': 'Wachtwoord kwijt?',
'Main Menu': 'Hoofdmenu',
'Manage Cache': 'Beheer Cache',
'Menu Model': 'Menu Model',
'merge': 'samenvoegen',
'Method': 'Method',
'Models': 'Modellen',
'models': 'modellen',
'Modified By': 'Aangepast Door',
'Modified On': 'Aangepast Op',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'Mijn Sites',
'Name': 'Naam',
'New': 'Nieuw',
'new application "%s" created': 'nieuwe applicatie "%s" gemaakt',
'New password': 'Nieuw wachtwoord',
'New Record': 'Nieuw Record',
'new record inserted': 'nieuw record ingevoegd',
'New Search': 'New Search',
'next 100 rows': 'volgende 100 rijen',
'NO': 'NEE',
'No databases in this application': 'Geen database in deze applicatie',
'No records found': 'No records found',
'None': 'None',
'not authorized': 'not authorized',
'not in': 'not in',
'Note': 'Note',
'Object or table name': 'Object of tabelnaam',
'Old password': 'Oude wachtwoord',
'Online examples': 'Online voorbeelden',
'Or': 'Of',
'or import from csv file': 'of importeer van csv-bestand',
'or provide application url:': 'of geef een applicatie url:',
'Origin': 'Bron',
'Original/Translation': 'Oorspronkelijk/Vertaling',
'Other Plugins': 'Andere Plugins',
'Other Recipes': 'Andere Recepten',
'Overview': 'Overzicht',
'pack all': 'pack all',
'pack compiled': 'pack compiled',
'Password': 'Wachtwoord',
'Password changed': 'Password changed',
"Password fields don't match": 'Wachtwoordvelden komen niet overeen',
'Peeking at file': 'Naar bestand aan het gluren',
'please input your password again': 'geef alstublieft nogmaals uw wachtwoord',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Inleiding',
'previous 100 rows': 'vorige 100 rijen',
'Profile': 'Profiel',
'Profile updated': 'Profile updated',
'Python': 'Python',
'Query': 'Query',
'Query:': 'Query:',
'Quick Examples': 'Snelle Voorbeelden',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Geleegd',
'Recipes': 'Recepten',
'Record': 'Record',
'record does not exist': 'record bestaat niet',
'Record ID': 'Record ID',
'Record id': 'Record id',
'register': 'registreer',
'Register': 'Registreer',
'Registration identifier': 'Registratie identifier',
'Registration key': 'Registratie sleutel',
'Registration successful': 'Registratie succesvol',
'Remember me (for 30 days)': 'Onthoudt mij (voor 30 dagen)',
'remove compiled': 'verwijder gecompileerde',
'Request reset password': 'Vraag een wachtwoord reset aan',
'Reset Password key': 'Reset Wachtwoord sleutel',
'Resolve Conflict file': 'Los Conflictbestand op',
'restore': 'herstel',
'revert': 'herstel',
'Role': 'Rol',
'Rows in Table': 'Rijen in tabel',
'Rows selected': 'Rijen geselecteerd',
'save': 'bewaar',
'Save profile': 'Bewaar profiel',
'Saved file hash:': 'Opgeslagen file hash:',
'Search': 'Zoek',
'Semantic': 'Semantisch',
'Services': 'Services',
'session expired': 'sessie verlopen',
'shell': 'shell',
'Sign Up': 'Sign Up',
'site': 'site',
'Size of cache:': 'Grootte van cache:',
'Slug': 'Slug',
'some files could not be removed': 'sommige bestanden konden niet worden verwijderd',
'Spreadsheet-optimised export of tab-separated content including hidden columns. May be slow': 'Spreadsheet-optimised export of tab-separated content including hidden columns. May be slow',
'Spreadsheet-optimised export of tab-separated content, visible columns only. May be slow.': 'Spreadsheet-optimised export of tab-separated content, visible columns only. May be slow.',
'Start building a new search': 'Start building a new search',
'starts with': 'begint met',
'state': 'state',
'static': 'statisch',
'Static files': 'Statische bestanden',
'Statistics': 'Statistieken',
'Stylesheet': 'Stylesheet',
'Submit': 'Submit',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Weet je zeker dat je dit object wilt verwijderen?',
'Table': 'Tabel',
'Table name': 'Tabelnaam',
'test': 'test',
'Testing application': 'Applicatie testen',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'De "query" is een conditie zoals "db.tabel1.veld1==\'waarde\'". Zoiets als "db.tabel1.veld1==db.tabel2.veld2" resulteert in een SQL JOIN.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'the applicatie logica, elk URL pad is gemapped in een blootgestelde functie in de controller',
'The Core': 'De Core',
'the data representation, define database tables and sets': 'de data representatie, definieert database tabellen en sets',
'The output of the file is a dictionary that was rendered by the view %s': 'De output van het bestand is een dictionary die gerenderd werd door de view %s',
'the presentations layer, views are also known as templates': 'de presentatie laag, views zijn ook bekend als templates',
'The Views': 'De Views',
'There are no controllers': 'Er zijn geen controllers',
'There are no models': 'Er zijn geen modellen',
'There are no modules': 'Er zijn geen modules',
'There are no static files': 'Er zijn geen statische bestanden',
'There are no translators, only default language is supported': 'Er zijn geen vertalingen, alleen de standaard taal wordt ondersteund.',
'There are no views': 'Er zijn geen views',
'these files are served without processing, your images go here': 'Deze bestanden worden geserveerd zonder verdere verwerking, je afbeeldingen horen hier',
'This App': 'Deze App',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': 'Dit is een kopie van de steiger-applicatie',
'This is the %(filename)s template': 'Dit is de %(filename)s template',
'Ticket': 'Ticket',
'Time in Cache (h:m:s)': 'Tijd in Cache (h:m:s)',
'Timestamp': 'Timestamp (timestamp)',
'to previous version.': 'naar vorige versie.',
'too short': 'te kort',
'translation strings for the application': 'vertaalstrings voor de applicatie',
'True': 'Waar',
'try': 'probeer',
'try something like': 'probeer zoiets als',
'TSV (Spreadsheets)': 'TSV (Spreadsheets)',
'TSV (Spreadsheets, hidden cols)': 'TSV (Spreadsheets, hidden cols)',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Niet mogelijk om te controleren voor upgrades',
'unable to create application "%s"': 'niet mogelijk om applicatie "%s" te maken',
'unable to delete file "%(filename)s"': 'niet mogelijk om bestand "%(filename)s" te verwijderen',
'Unable to download': 'Niet mogelijk om te downloaden',
'Unable to download app': 'Niet mogelijk om app te downloaden',
'unable to parse csv file': 'niet mogelijk om csv-bestand te parsen',
'unable to uninstall "%s"': 'niet mogelijk om "%s" te deïnstalleren',
'uncheck all': 'vink alles uit',
'uninstall': ' deïnstalleer',
'update': 'update',
'update all languages': 'update alle talen',
'Update:': 'Update:',
'upload application:': 'upload applicatie:',
'Upload existing application': 'Upload bestaande applicatie',
'upload file:': 'upload bestand',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Gebruik (...)&(...) voor AND, (...)|(...) voor OR, en ~(...) voor NOT om meer complexe queries te maken.',
'User %(id)s Logged-in': 'Gebruiker %(id)s Logged-in',
'User %(id)s Logged-out': 'Gebruiker %(id)s Logged-out',
'User %(id)s Password changed': 'Wachtwoord van gebruiker %(id)s is veranderd',
'User %(id)s Password reset': 'Wachtwoord van gebruiker %(id)s is gereset',
'User %(id)s Profile updated': 'Profiel van Gebruiker %(id)s geupdate',
'User %(id)s Registered': 'Gebruiker %(id)s Geregistreerd',
'User ID': 'User ID',
'Username': 'Username',
'Username already taken': 'Username already taken',
'value already in database or empty': 'waarde al in database of leeg',
'Verify Password': 'Verifieer Wachtwoord',
'versioning': 'versionering',
'Videos': 'Videos',
'View': 'View',
'view': 'view',
'Views': 'Vieuws',
'views': 'vieuws',
'web2py is up to date': 'web2py is up to date',
'web2py Recent Tweets': 'web2py Recente Tweets',
'Welcome': 'Welkom',
'Welcome %s': 'Welkom %s',
'Welcome to web2py': 'Welkom bij web2py',
'Welcome to web2py!': 'Welkom bij web2py!',
'Which called the function %s located in the file %s': 'Die functie %s aanriep en zich bevindt in het bestand %s',
'Working...': 'Working...',
'XML': 'XML',
'XML export of columns shown': 'XML export of columns shown',
'YES': 'JA',
'You are successfully running web2py': 'Je draait web2py succesvol',
'You can modify this application and adapt it to your needs': 'Je kan deze applicatie aanpassen naar je eigen behoeften',
'You visited the url %s': 'Je bezocht de url %s',
}
|
remcoboerma/w2p_redirector
|
languages/nl.py
|
Python
|
lgpl-3.0
| 19,638
|
[
"Elk"
] |
14202c20b2355c75307e38f9d2efe2793863278b870c9e7a5fd34fbc564f5c64
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""
modis_ingester.py - Ingester script for Modis datasets.
"""
from __future__ import absolute_import
import os
import sys
import logging
from os.path import basename
from osgeo import gdal
from EOtools.execute import execute
from ..abstract_ingester import SourceFileIngester
from .modis_dataset import ModisDataset
#
# Set up root logger
#
# Note that the logging level of the root logger will be reset to DEBUG
# if the --debug flag is set (by AbstractIngester.__init__). To recieve
# DEBUG level messages from a module do two things:
# 1) set the logging level for the module you are interested in to DEBUG,
# 2) use the --debug flag when running the script.
#
logging.basicConfig(stream=sys.stdout,
format='%(message)s',
level=logging.INFO)
#
# Set up logger (for this module).
#
LOGGER = logging.getLogger(__name__)
def _is_modis_file(filename):
"""
Does the given file match a Modis NetCDF file?
(we could make this more extensive in the future, but it's directly derived from the old find_files() logic.
:type filename: str
:rtype: bool
>>> d = '/g/data/u39/public/data/modis/datacube/mod09-swath/terra/2010/12/31'
>>> f = 'MOD09_L2.2010365.2300.20130130162407.remapped_swath_500mbands_0.005deg.nc'
>>> _is_modis_file(f)
True
>>> _is_modis_file(os.path.join(d, f))
True
>>> _is_modis_file(d)
False
"""
basename = os.path.basename(filename).lower()
return basename.startswith('mod') and filename.endswith(".nc")
class ModisIngester(SourceFileIngester):
"""Ingester class for Modis datasets."""
def __init__(self, datacube=None, collection=None):
super(ModisIngester, self).__init__(_is_modis_file, datacube, collection)
def open_dataset(self, dataset_path):
"""Create and return a dataset object.
dataset_path: points to the dataset to be opened and have
its metadata read.
"""
return ModisDataset(dataset_path)
def filter_dataset(self, path, row, date):
"""Return True if the dataset should be included, False otherwise.
Overridden to allow NULLS for row
"""
(start_date, end_date) = self.get_date_range()
(min_path, max_path) = self.get_path_range()
(min_row, max_row) = self.get_row_range()
include = ((int(max_path) is None or path is None or int(path) <= int(max_path)) and
(int(min_path) is None or path is None or int(path) >= int(min_path)) and
(end_date is None or date is None or date <= end_date) and
(start_date is None or date is None or date >= start_date))
return include
def preprocess_dataset(self, dataset_list):
"""Performs pre-processing on the dataset_list object.
dataset_list: list of datasets to be opened and have
its metadata read.
"""
temp_dir = self.collection.get_temp_tile_directory()
vrt_list = []
for dataset_path in dataset_list:
fname = os.path.splitext(basename(dataset_path))[0]
dataset_dir = os.path.split(dataset_path)[0]
mod09_fname = temp_dir + '/' + fname + '.vrt'
rbq500_fname = temp_dir + '/' + fname + '_RBQ500.vrt'
dataset = gdal.Open(dataset_path, gdal.GA_ReadOnly)
subDataSets = dataset.GetSubDatasets()
command_string = 'gdalbuildvrt -separate -overwrite '
command_string += mod09_fname
command_string += ' ' + subDataSets[1][0] # band 1
command_string += ' ' + subDataSets[2][0] # band 2
command_string += ' ' + subDataSets[3][0] # band 3
command_string += ' ' + subDataSets[4][0] # band 4
command_string += ' ' + subDataSets[5][0] # band 5
command_string += ' ' + subDataSets[6][0] # band 6
command_string += ' ' + subDataSets[7][0] # band 7
result = execute(command_string=command_string)
if result['returncode'] != 0:
raise DatasetError('Unable to perform gdalbuildvrt on bands: ' +
'"%s" failed: %s'\
% (buildvrt_cmd, result['stderr']))
vrt_list.append(mod09_fname)
command_string = 'gdalbuildvrt -separate -overwrite '
command_string += rbq500_fname
command_string += ' ' + subDataSets[0][0] # 500m PQA
result = execute(command_string=command_string)
if result['returncode'] != 0:
raise DatasetError('Unable to perform gdalbuildvrt on rbq: ' +
'"%s" failed: %s'\
% (buildvrt_cmd, result['stderr']))
vrt_list.append(rbq500_fname)
return vrt_list
if __name__ == '__main__':
import doctest
doctest.testmod()
|
ama-jharrison/agdc
|
agdc/agdc/modis_ingester/__init__.py
|
Python
|
apache-2.0
| 5,726
|
[
"NetCDF"
] |
0dcb14d0af5c9791ec4a71e2ee64bdfd7f0b2862a80b5a2358ad80c58c66550d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformed[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger("gensim.models.word2vec")
try:
from gensim.models.word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
def train_sentence_sg(model, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(model, model.index2word[word.index], word2.index, alpha)
return len(word_vocabs)
def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha)
return len(word_vocabs)
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None):
if context_vectors is None:
context_vectors = model.syn0
if context_locks is None:
context_locks = model.syn0_lockf
if word not in model.vocab:
return
predict_word = model.vocab[word] # target word (NN output)
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
model.syn0[i] += neu1e * model.syn0_lockf[i]
return neu1e
# could move this import up to where train_* is imported,
# but for now just do it separately incase there are unforseen bugs in score_
try:
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
except ImportError:
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(sentence[start:(pos + model.window + 1)], start):
# don't train on OOV words and on the `word` itself
if word2 and not (pos2 == pos):
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(sentence[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
def score_sg_pair(model, word, word2):
l1 = model.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = -1.0**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(
self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
sg=1, hs=1, negative=0, cbow_mean=0, hashfxn=hash, iter=1, null_word=0,
trim_rule=None):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=1`), skip-gram is used.
Otherwise, `cbow` is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(sentences)
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
cumulative = 0.0
for word_index in range(vocab_size):
cumulative += self.vocab[self.index2word[word_index]].count**power / train_words_pow
self.cum_table[word_index] = round(cumulative * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, trim_rule=trim_rule) # initial survey
self.scale_vocab(keep_raw_vocab, trim_rule=trim_rule) # trim by min_count & precalculate downsampling
self.finalize_vocab() # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
for sentence_no, sentence in enumerate(sentences):
if sentence_no % progress_per == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, sum(itervalues(vocab)) + total_words, len(vocab))
for word in sentence:
vocab[word] += 1
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
total_words += sum(itervalues(vocab))
logger.info("collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
# Discard words less-frequent than min_count
if not dry_run:
self.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.vocab = {}
drop_unique, drop_total, retain_total, original_total = 0, 0, 0, 0
retain_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
original_total += v
if not dry_run:
self.vocab[word] = Vocab(count=v, index=len(self.index2word))
self.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_total += v
logger.info("min_count=%d retains %i unique words (drops %i)",
min_count, len(retain_words), drop_unique)
logger.info("min_count leaves %i word corpus (%i%% of original %i)",
retain_total, retain_total * 100 / max(original_total, 1), original_total)
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total)
# return from each step: words-affected, resulting-corpus-size
report_values = {'drop_unique': drop_unique, 'retain_total': retain_total,
'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)}
# print extra memory estimates
report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words))
return report_values
def finalize_vocab(self):
"""Build tables and model weights based on final vocabulary settings."""
if not self.index2word:
self.scale_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
# set initial input/projection and hidden weights
self.reset_weights()
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.vocab = other_model.vocab
self.index2word = other_model.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
raw_tally = 0
for sentence in job:
if self.sg:
tally += train_sentence_sg(self, sentence, alpha, work)
else:
tally += train_sentence_cbow(self, sentence, alpha, work, neu1)
raw_tally += len(sentence)
return (tally, raw_tally)
def _raw_word_count(self, items):
return sum(len(item) for item in items)
def train(self, sentences, total_words=None, word_count=0, chunksize=100, total_examples=None, queue_factor=2, report_delay=1):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples
(count of sentences) or total_words (count of raw words in sentences) should be provided, unless the
sentences are the same as those that were used to initially build the vocabulary.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training.")
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not hasattr(self, 'syn0'):
raise RuntimeError("you must first finalize vocabulary before training the model")
if total_words is None and total_examples is None:
if self.corpus_count:
total_examples = self.corpus_count
logger.info("expecting %i examples, matching count from corpus used for vocabulary survey", total_examples)
else:
raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations")
if self.iter > 1:
sentences = utils.RepeatCorpusNTimes(sentences, self.iter)
total_words = total_words and total_words * self.iter
total_examples = total_examples and total_examples * self.iter
def worker_init():
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return (work, neu1)
def worker_one_job(job, inits):
items, alpha = job
if items is None: # signal to finish
return False
# train & return tally
tally, raw_tally = self._do_train_job(items, alpha, inits)
progress_queue.put((len(items), tally, raw_tally)) # report progress
return True
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
init = worker_init()
while True:
job = job_queue.get()
if not worker_one_job(job, init):
break
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
if self.workers > 0:
job_queue = Queue(maxsize=queue_factor * self.workers)
else:
job_queue = FakeJobQueue(worker_init, worker_one_job)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
pushed_words = 0
pushed_examples = 0
example_count = 0
trained_word_count = 0
raw_word_count = word_count
push_done = False
done_jobs = 0
next_alpha = self.alpha
jobs_source = enumerate(utils.grouper(sentences, chunksize))
# fill jobs queue with (sentence, alpha) job tuples
while True:
try:
job_no, items = next(jobs_source)
logger.debug("putting job #%i in the queue at alpha %.05f", job_no, next_alpha)
job_queue.put((items, next_alpha))
# update the learning rate before every next job
if self.min_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(items)
next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_examples / total_examples)
else:
# words-based decay
pushed_words += self._raw_word_count(items)
next_alpha = self.alpha - (self.alpha - self.min_alpha) * (pushed_words / total_words)
next_alpha = max(next_alpha, self.min_alpha)
except StopIteration:
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put((None, 0)) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no+1) or not push_done:
examples, trained_words, raw_words = progress_queue.get(push_done) # only block after all jobs pushed
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s",
100.0 * example_count / total_examples, trained_word_count / elapsed)
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s",
100.0 * raw_word_count / total_words, trained_word_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
logger.info(
"training on %i raw words took %.1fs, %.0f trained words/s",
raw_word_count, elapsed, trained_word_count / elapsed if elapsed else 0.0)
if total_examples and total_examples != example_count:
logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples)
if total_words and total_words != raw_word_count:
logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
def _score_job_words(self, sentence, inits):
work, neu1 = inits
if self.sg:
return score_sentence_sg(self, sentence, work)
else:
return score_sentence_cbow(self, sentence, work, neu1)
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e9), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that)
Note that you should specify total_sentences; we'll run into problems if you ask to score more than the default
See the article by Taddy [taddy]_ for examples of how to use such scores in document classification.
.. [taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness.")
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError("we have only implemented score for hs")
def worker_init():
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
return (work, neu1)
def worker_one_job(job, inits):
if job is None: # signal to finish
return False
ns = 0
for (id, sentence) in job:
sentence_scores[id] = self._score_job_words(sentence, inits)
ns += 1
progress_queue.put(ns) # report progress
return True
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
init = worker_init()
while True:
job = job_queue.get()
if not worker_one_job(job, init):
break
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
if self.workers > 0:
job_queue = Queue(maxsize=queue_factor * self.workers)
else:
job_queue = FakeJobQueue(worker_init, worker_one_job)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no+1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info("scoring %i sentences took %.1fs, %.0f sentences/s"
% (sentence_count, elapsed, sentence_count / elapsed if elapsed else 0.0))
return sentence_scores[:sentence_count]
def clear_sims(self):
self.syn0norm = None
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.syn0 = empty((len(self.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
# construct deterministic seed from word AND seed argument
self.syn0[i] = self.seeded_vector(self.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(uint32(self.hashfxn(seed_string)))
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
if fvocab is not None:
logger.info("storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.vector_size, fname))
assert (len(self.vocab), self.vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True, encoding='utf8'):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
result = cls(size=vector_size)
result.syn0 = zeros((vocab_size, vector_size), dtype=REAL)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding)
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = weights
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result
def intersect_word2vec_format(self, fname, binary=False, encoding='utf8'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
"""
overlap_count = 0
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
self.syn0_lockf[self.vocab[word].index] = 0.0 # lock it
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.syn0.shape, fname))
def most_similar(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
dists = dot(self.syn0norm, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = set()
def word_vec(word):
if isinstance(word, ndarray):
return word
elif word in self.vocab:
all_words.add(self.vocab[word].index)
return self.syn0norm[self.vocab[word].index]
else:
raise KeyError("word '%s' not in vocabulary" % word)
positive = [word_vec(word) for word in positive]
negative = [word_vec(word) for word in negative]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.syn0[self.vocab[words].index]
return vstack([self.syn0[self.vocab[word].index] for word in words])
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info("estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'])
return report
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word whose frequency
is not in the top-N most frequent words (default top 30,000).
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = dict(sorted(iteritems(self.vocab),
key=lambda item: -item[1].count)[:restrict_vocab])
ok_index = set(v.index for v in itervalues(ok_vocab))
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip()))
continue
ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False)
for index in matutils.argsort(sims, reverse=True):
if index in ok_index and index not in ignore:
predicted = self.index2word[index]
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (self.__class__.__name__, len(self.index2word), self.vector_size, self.alpha)
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
else:
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
class FakeJobQueue(object):
"""Pretends to be a Queue; does equivalent of work_loop in calling thread."""
def __init__(self, init_fn, job_fn):
self.inits = init_fn()
self.job_fn = job_fn
def put(self, job):
self.job_fn(job, self.inits)
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=1000):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=10000, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or no clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
logging.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
infile = sys.argv[1]
from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
# model = Word2Vec(LineSentence(infile), size=200, min_count=5, workers=4)
model = Word2Vec(Text8Corpus(infile), size=200, min_count=5, workers=1)
if len(sys.argv) > 3:
outfile = sys.argv[3]
model.save(outfile + '.model')
model.save_word2vec_format(outfile + '.model.bin', binary=True)
model.save_word2vec_format(outfile + '.model.txt', binary=False)
if len(sys.argv) > 2:
questions_file = sys.argv[2]
model.accuracy(sys.argv[2])
logging.info("finished running %s", program)
|
kmike/gensim
|
gensim/models/word2vec.py
|
Python
|
gpl-3.0
| 72,370
|
[
"VisIt"
] |
66122b0b4e7a42f09504b532068c07dbb2921efeb92aae1535f350dc13a653b8
|
#!/software/python/3.3.3/bin/python3.3
import sys
import os
import argparse
import locations
import subprocess
import pickle
import pwd
from redhawk import *
# Second version of the script for testing RAIDER.
# Generates runs only; will use a seperate script for testing.
#######################
# Globals: So we don't have to pass everything through parameters.
args = None # Command line arguments object
Locations = locations.Locations
progress_fp = None
job_log_dir = None
seed_map = {} # Maps seed to (index,short rep.) pairs
seed_list = None # Sorted list of seeds
#######################
# Defaults
walltime_default = "00:30:00"
blast_walltime_default = "4:00:00"
rs_walltime_default = "4:00:00"
rm_walltime_default = "4:00:00"
delay_default = 300 # Number of seconds to sleep when cycling on a redhawk wait
tool_prefix = {'phRAIDER' : 'phRA', 'RepeatScout' : 'RS', 'RAIDER': 'RA', 'pre-phRAIDER' : 'prephRA', 'naive':'na'}
num_jobs = 0;
#######################
# Useful utilitiy functions
user_id = pwd.getpwuid(uid)[0]
def tmp_dir():
global num_jobs
name = "$TMPDIR/" + user_id + "." + str(num_jobs)
num_jobs += 1
return name;
def file_base(file):
"""Extract the name of a file froma directory"""
return os.path.basename(file)
def file_dir(file):
"""Extract the directory a file is contained in"""
return file.rstrip(file_base(file)).rstrip("/")
def make_dir(DIR):
if not os.path.exists(DIR):
os.makedirs(DIR)
return DIR
def convert_seed(seed):
"""Convert an abriviated seed to a full seed (e.g. "1{2}0{3}1{2}" => "1100011" """
i = 0
while (i < len(seed)-1):
if seed[i+1] == '^':
j = i+2
assert seed[j] == "{"
k = j+1
while seed[k] != '}':
k += 1
n = int(seed[j+1:k])
seed = seed[:i] + seed[i]*n + seed[k+1:]
i += 1
return seed
def seed_weight(seed):
return sum([x == 1 for x in seed])
def cmp_seed(s1, s2):
d = len(s1) - len(s2)
if (d != 0):
return d < 0
d = weight(s1) - weight(s2)
if f != 0:
return d < 0
return s1 < s2
#######################
# Command line parsing
def parse_params():
"""Parse command line arguments, set them equal to the global args, and set certain global variables"""
parser = argparse.ArgumentParser(description = "Evaluate RAIDER against RepeatScout")
# TOOL SELECTION
parser_tools = parser.add_argument_group("tool selection (all on by default)")
parser_tools.add_argument('-R', '--raider_on', dest = 'run_raider', action = 'store_true', help = 'Turn RAIDER on', default = False)
parser_tools.add_argument('--R2', '--phR', '--phraider_on', dest = 'run_phraider', action = 'store_true', help = 'Turn phRAIDER on', default = False)
parser_tools.add_argument('--RS', '--repscout_on', dest = 'run_repscout', action = 'store_true', help = 'Turn RepScout on', default = False)
parser_tools.add_argument('--PR', '--preradier_on', dest = 'run_prephraider', action = 'store_true', help = 'Turn preRAIDER on', default = False)
parser_tools.add_argument('-N', '--naive_on', dest = 'run_naive', action = 'store_true', help = 'Turn on naive tool', default = False)
#parser_tools.add_argument('--AR', '--araider_on', dest = 'run_araider', action = 'store_true', help = 'Turn ARAIDER on', default = False)
#parser_tools.add_argument('-B', '--bigfoot_on', dest = 'run_bigfoot', action = 'store_true', help = 'Turn BIGFOOT on', default = False)
#parser_tools.add_argument('-P', '--piler_on', dest = 'run_piler', action = 'store_true', help = 'Turn PILER on', default = False)
#parser_tools.add_argument('-A', '--all_tools', dest = 'all_tools', action = 'store_true', help = 'Turn all tools on (overide all other tool arguments)', default = False)
#parser_tools.add_argument('--A2', '--all_tools2', dest = 'all_tools2', action = 'store_true', help = 'Turn all tools on except araider (overide all other tool arguments)', default = False)
#parser_tools.add_argument('--tl', '--time_limit', dest = 'time_limit', help = 'Redhawk time limit (max: 400:00:00 default: 4:00:00)', default = walltime_default)
# I/O ARGUMENTs
parser_io = parser.add_argument_group("i/o arguments")
parser_io.add_argument('-r', '--results_dir', dest = "results_dir", help = "Directory containing all results", default = "EVAL")
#parser_io.add_argument('--nuke', dest ='nuke', action = "store_true", help = "Nuke the results directory", default = False)
# RAIDER ARGUMENTS
raider_argument = parser.add_argument_group("RAIDER parameters")
raider_argument.add_argument('-f', nargs = "+", type = int, help = "E.R. occurrence threshold", default = [2])
raider_argument.add_argument('--pre', '--pre_scan', action = 'store_true', help = "Use pre-scan version of raider", default = False)
raider_argument.add_argument('--mem', action = 'store_true', help = "Use large memory-nodes", default = False);
raider_argument.add_argument('--rnn', type = int, dest = 'raider_num_nodes', help = "Number nodes to use", default = 1);
raider_argument.add_argument("--mn", '--max_nodes', dest = "max_nodes", action="store_true", help="Reserve all nodes of a processor for each tool (disabled by default).", default=False)
consensus_group = raider_argument.add_mutually_exclusive_group(required = False)
consensus_group.add_argument('--sc', '--standard_consensus', dest = "consensus_type", action = 'store_const', const = 1, help = "Run standard consensus tool", default = 0)
consensus_group.add_argument('--cd', '--composite_discover', dest = "consensus_type", action = 'store_const', const = 2, help = "Run composite discover tool")
seed_group = raider_argument.add_mutually_exclusive_group(required = False)
seed_group.add_argument('-s', '--seed', dest = "seed", help = "Spaced seed string", default = "111111111111111111111111111111")
seed_group.add_argument('--sf', '--seed_file', dest = 'seed_file', help = 'File containing raider seeds', default = None)
# RAIDER2 ARGUMENTS
raider2_argument = parser.add_argument_group("RAIDER2 parameters")
raider2_argument.add_argument('--age', type = int, help="Use older version of raider2", default=1)
raider2_argument.add_argument('--aa', '--all_ages', dest="all_ages", action="store_true", help="Run all ages of raider2", default=False) # type = int, help="Use older version of raider", default=0)
#raider2_argument.add_argument('--multi', '--multi_seed', dest="multi_seed", action="store_true", help="Run all seeds in seed file concurrently",default=False)
raider2_argument.add_argument('--na', '--no_family_array', dest="family_array", action="store_false", help="Disable family array in Raider2", default=True)
raider2_argument.add_argument('--ex', '--excise', dest="excising", action="store_true", help="Enable excising in RAIDER2", default=False)
raider2_argument.add_argument('--no', '--no_overlaps', dest="overlaps", action="store_false", help="Do not require overlaps in RAIDER2", default=True)
raider2_argument.add_argument('--tu', '--tie_up', dest="tieup", action="store_true", help="Enable alternative tie ups", default=False)
raider2_argument.add_argument('--ps', '--prosplit', dest="prosplit", action="store_true", help="Enable proactive splitting(disabled by default).", default=False)
raider2_argument.add_argument("--pf", '--prevfam', dest="prevfam", action="store_true", help="Enable pointers to prev family (disabled by default).", default=False)
# REPSCOUT ARGUMENTS
repscout_argument = parser.add_argument_group("REPSCOUT parameters")
#repscout_argument.add_argument('--rs_min', type = int, help = "Minimum repeat length for repscout.", default = 10)
repscout_argument.add_argument('--rs_min_freq', type = int, help = "Minimum repeat frequency for repscout.", default = 3)
repscout_argument.add_argument('--rs_walltime', help = "RepeatScout walltime.", default = rs_walltime_default)
repscout_argument.add_argument('--rs_filters', type = int, help = "0: no filters, 1: 1 filter, 2: both filters.", default = 0)
# REPEAT MASKER ARGUMENTS
repeatmasker_arguments = parser.add_argument_group("RepeatMasker parameters")
repeatmasker_arguments.add_argument('--rm', '--run_rm', dest = 'run_rm', action="store_true", help = "run RepeatMasker", default = False)
repeatmasker_arguments.add_argument('--masker_dir', help = "Repeat masker output directory", default = None)
repeatmasker_arguments.add_argument('-p', '--pa', type = int, help = "Number of processors will be using", default = 1)
repeatmasker_arguments.add_argument('--rwt', '--rm_walltime', dest = "rm_walltime", help = "Wall time limit for repeat masker", default = rm_walltime_default)
repeatmasker_arguments.add_argument('--co', '--cutoff', dest = "rm_cutoff", type = int, help = "RepeatMasker cutoff parameter", default = None);
# BLAST ARGUMENTS
blast_arguments = parser.add_argument_group("BLAST parameters")
blast_arguments.add_argument('--bl', '--run_blast', dest = 'run_blast', action = "store_true", help = "run BLAST", default = False)
blast_arguments.add_argument('--evalue', dest = 'evalue', help = "BLASE evalue", default = "0.000001");
blast_arguments.add_argument('--short', dest = 'short', action = 'store_false', help = "Turn off blast-short on blast run", default = True)
blast_arguments.add_argument('--max_target', dest = 'max_target', action = 'store', help = "BLAST --max_target option", default = '999999999') # HACK!!! Need to fix this
blast_arguments.add_argument('--nt', '--num_threads', dest = 'num_threads', action = 'store', type = int, help = "BLAST --num_threads argument", default = 1)
# DEBUGGING ARGUMENTS
debug_group = parser.add_argument_group(title = "debugging")
debug_group.add_argument('--sp', '--show_progress', dest = 'show_progress', action = 'store_true', help = "Print reports on program progress to stderr", default = False)
debug_group.add_argument('--dry', '--dry_run', dest = 'dry_run', action = 'store_true', help = "Dry run -- don't actually launch jobs", default = False)
# Positional arguments
parser.add_argument('data_files', nargs = '+', help = "Data files to process")
global args;
args = parser.parse_args(args)
def print_progress(L):
"""Print s to progress_fp. Also print it to stdout if args.sp is true."""
progress_fp.write("\n".join(L) + "\n\n");
progress_fp.flush()
if args.show_progress:
sys.stdout.write("\n".join(L) + "\n\n")
sys.stdout.flush()
def launch_job(cmd, title, base_dir, walltime = walltime_default, ppn = 1, bigmem = False, depend = None, modules = None, attrs = None):
"""Launch a redhawk jobs.
* cmd: Command to be launched
* title: Used as the bases for creating a job name and redhawk-related files
* base_dir: Directory to place all redhawk-related files
* walltime: walltime limit
* ppn: Number of processors requested
* bigmem: If true, run only on a large-memory node
* depend: A list of job objects which must terminate before first
* attrs: A map of new attribute/value pairs to add to the PBS object after creation (before pickling)
Returns:
* The unpickled pbs object if they job had already been launched and is still showing up in qstat.
* None if the job had finished and is no longer showing up in qstat
* A newly created pbs object otherswise. (Job had never been launched, or crashed in the middle.)
PROBLEM: will still screw up if the job launched, failed, and created a (bogus) ofile.
"""
log_file = job_log_dir + "/" + title
if os.path.exists(log_file):
with open(log_file, "br") as fp:
p = loadPBS(fp)[0]
if p.checkJobState():
return p
if p.rfile_exists():
return None
if p.efile_exists() and os.stat(p.efile_name()).st_size > 0:
return None
batch_file = base_dir + "/" + title + ".job"
job_name = title;
stdout_file = base_dir + "/" + title + ".stdout"
stderr_file = base_dir + "/" + title + ".stderr"
res_file = base_dir + "/" + title + ".res"
print_progress(["# " + batch_file, "# " + stdout_file, "# " + stderr_file, cmd]);
p = pbsJobHandler(batch_file = batch_file, executable = cmd, job_name = job_name,
stdout_file = stdout_file, stderr_file = stderr_file, res_file = res_file,
walltime = walltime, depends = depend,
mem = Locations['high_mem_arch'] if bigmem else False,
RHmodules = modules, ppn = ppn, mail = "ae")
if attrs:
for key,value in attrs.items():
setattr(p, key, value)
if (not args.dry_run):
p.submit(preserve = True, delay = delay_default, job_limit = 1000)
with open(log_file, "wb") as fp:
storePBS([p], fp)
return p
def setup():
global args
global job_log_dir;
job_log_dir = args.results_dir + "/job_log/"
global debug_file
debug_file = args.results_dir + "/debug.txt"
#if args.nuke and os.path.exists(args.results_dir):
# subprocess.call("rm -r %s" % args.results_dir, shell = True)
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
if not os.path.exists(job_log_dir):
os.makedirs(job_log_dir)
if os.path.exists(args.results_dir + "/f.txt"):
S = {int(f.rstrip()) for f in open(args.results_dir + "/f.txt")}
args.f = list(S | set(args.f))
open(args.results_dir + "/f.txt", "w").write("\n".join([str(x) for x in args.f]) + "\n")
global progress_fp
progress_fp = open(debug_file, "w")
global seed_map
if os.path.exists(args.results_dir + "/seed_file.txt"):
with open(args.results_dir + "/seed_file.txt") as fp:
seed_map = {convert_seed(seed):(int(i),seed) for line in fp for i,seed in [re.split("\t+", line.strip())]}
else:
seed_map = {}
seed_map2 = {}
offset = len(seed_map)
if args.seed_file:
for line in open(args.seed_file):
seed = convert_seed(line.rstrip())
if seed not in seed_map:
seed_map[seed] = (offset,line.rstrip())
seed_map2[seed] = (offset,line.rstrip())
offset += 1
else:
seed_map2[seed] = (seed_map[seed][0],line.rstrip())
else:
if args.seed not in seed_map:
seed_map[convert_seed(args.seed)] = (offset,args.seed)
seed_map2[convert_seed(args.seed)] = (offset,args.seed)
with open(args.results_dir + "/seed_file.txt", "w") as fp:
fp.write("\n".join([str(x[0]) + "\t" + x[1] for x in sorted(seed_map.values(), key = lambda x: x[0])]))
global seed_list
seed_list = sorted(seed_map2.keys(), key = lambda v: seed_map2[v][0])
# Create data_files.txt
data_files = args.results_dir + "/data_files.txt"
if os.path.exists(data_files):
S = {re.split("\s+", line.rstrip())[1] for line in open(data_files)}
else:
S = {}
with open(data_files, "a") as fp:
for file in args.data_files:
if file not in S:
fp.write(file_base(file).rstrip(".fa") + "\t" + file + "\n")
raider_cmd = "mkdir {TMPDIR}; {time} {raider} -q -c {f} -s {seed} {input_file} {TMPDIR}; ls {TMPDIR}; cp {TMPDIR}/elements {output_dir}/; rm -r -f {TMPDIR}"
consensus_cmd = "{python} consensus_seq.py -s {data_file} -e {elements_file} {consensus_txt} {consensus_fa}"
repeat_masker_cmd = "mkdir {TMPDIR}; cd {TMPDIR}; {RepeatMasker} -nolow -lib $PBS_O_WORKDIR/{library_file} {cutoff} -pa {pa} -dir {TMPDIR} $PBS_O_WORKDIR/{seq_file}; mv {TMPDIR}/{seq_file_base}.out $PBS_O_WORKDIR/{output_dir}/; rm -r -f {TMPDIR}"
blast_format = "6 qseqid sseqid qstart qend qlen sstart send slen"
blast_cmd = "mkdir {TMPDIR}; cd {TMPDIR}; {blast} -out {TMPDIR}/{blast_file} -outfmt \"{blast_format}\" -query $PBS_O_WORKDIR/{consensus_file} -db $PBS_O_WORKDIR/{db_file} -evalue {evalue} {short} -max_target_seqs {max_target} -num_threads {num_threads}; " + \
"bzip2 {TMPDIR}/{blast_file}; " + "cp {TMPDIR}/{blast_file}.bz2 $PBS_O_WORKDIR/{blast_dir}/; rm -r -f {TMPDIR}"
composite_cmd = "mkdir {TMPDIR}; {time} {composite_discover} {elements_file} {seq_file} {TMPDIR}/{output_file}; mv {TMPDIR}/{output_file} {consensus_file}; rm -r -f {TMPDIR}"
def raider_pipeline(raider_exe, input_file, seed, f):
##########################
# SETUP
input_base = file_base(input_file).strip(".fa")
raider_dir = make_dir(args.results_dir + "/" + raider_exe.upper()); # Directory of RAIDER results
title = "{prefix}.{file}.s{seed_index}.f{f}".format(prefix = tool_prefix[raider_exe], file=input_base, seed_index=seed_map[seed][0], f=f)
seed_index = seed_map[seed][0];
consensus_name = input_base + ".s" + str(seed_index) + ".f" + str(f)
elements_dir = make_dir(raider_dir + "/" + consensus_name.upper())
consensus_txt = consensus_name + ".consensus.txt"
consensus_fa = consensus_name + ".consensus.fa"
database_file = input_file.rstrip(".fa") + ".rptseq.fa"
blast_dir = elements_dir
blast_file = consensus_name + ".blast.6.txt"
rm_dir = elements_dir
##########################
# Step 1: Run phRAIDER
cmd1 = raider_cmd.format(raider=Locations[raider_exe], time=Locations['time_cmd'], f=f, seed=seed,
input_file=input_file, output_dir=elements_dir, TMPDIR = tmp_dir())
if raider_exe == "RAIDER": # Hack -- I should modify the raider code
cmd1 = re.sub("-s", " ", cmd1)
title1 = title;
p1 = launch_job(cmd=cmd1, title=title1, base_dir=elements_dir, ppn = Locations['proc_per_node'] if args.max_nodes else args.raider_num_nodes, bigmem = args.mem, attrs = {'elements':'elements_dir/elements'})
if args.consensus_type == 0:
return None
if args.consensus_type == 1:
cmd2 = consensus_cmd.format(python=Locations['python'], data_file=input_file,
elements_file=elements_dir + "/elements",
consensus_txt=elements_dir + "/" + consensus_txt,
consensus_fa=elements_dir + "/" + consensus_fa)
else:
cmd2 = composite_cmd.format(time = Locations['time_cmd'], TMPDIR = tmp_dir(), composite_discover = Locations['CompositeDiscover'], elements_file = elements_dir + "/elements", seq_file = input_file, output_dir = elements_dir, output_file = consensus_fa, consensus_file = elements_dir + "/" + consensus_fa)
title2 = "cd." + title;
p2 = launch_job(cmd=cmd2, title=title2, base_dir=elements_dir, depend=[p1], bigmem = args.mem, ppn = Locations['proc_per_node'] if args.max_nodes else 1, attrs = {'consensus':consensus_fa})
# Step 3: Apply repeat masker consensus output
if args.run_rm:
cmd3 = repeat_masker_cmd.format(RepeatMasker = Locations['RepeatMasker'],
library_file = elements_dir + "/" + consensus_fa, pa = args.pa,
output_dir = rm_dir,
seq_file = input_file, seq_file_base = file_base(input_file), TMPDIR = tmp_dir(),
cutoff = "" if args.rm_cutoff is None else "-cutoff " + str(args.rm_cutoff))
title3 = "rm." + title
p3 = launch_job(cmd=cmd3, title=title3, base_dir=rm_dir, walltime = args.rm_walltime, ppn = args.pa, bigmem = False,
modules = Locations['rm_modules'], depend=[p2], attrs={'rm_output':rm_dir + '/' + input_base + ".fa.out"})
# Step 4: Apply blast to consensus sequences:
if args.run_blast:
cmd4 = blast_cmd.format(blast = Locations['blast'], blast_format = blast_format, blast_dir = blast_dir, blast_file = blast_file, consensus_file = elements_dir + "/" + consensus_fa,
db_file = database_file, evalue = args.evalue, short = "-task blastn-short" if args.short else "", max_target = args.max_target,
num_threads = args.num_threads, TMPDIR = tmp_dir())
title4 = "bl." + title
p4 = launch_job(cmd=cmd4, title=title4, base_dir=elements_dir, modules=Locations['blast_modules'], depend=[p2], ppn = args.num_threads, walltime = blast_walltime_default)
naive_cmd = "mkdir {TMPDIR}; {time} {naive} {input_file} {seed} {f} {TMPDIR}/na.elements; ls {TMPDIR}; cp {TMPDIR}/na.elements {output_dir}/; rm -r -f {TMPDIR}"
def naive_pipeline(input_file, seed, f):
##########################
# SETUP
input_base = file_base(input_file).strip(".fa")
naive_dir = make_dir(args.results_dir + "/NAIVE"); # Directory of NAIVE results
title = "{prefix}.{file}.s{seed_index}.f{f}".format(prefix = tool_prefix['naive'], file=input_base, seed_index=seed_map[seed][0], f=f)
seed_index = seed_map[seed][0];
consensus_name = input_base + ".s" + str(seed_index) + ".f" + str(f)
elements_dir = make_dir(naive_dir + "/" + consensus_name.upper())
consensus_txt = consensus_name + ".consensus.txt"
consensus_fa = consensus_name + ".consensus.fa"
database_file = input_file.rstrip(".fa") + ".rptseq.fa"
blast_dir = elements_dir
blast_file = consensus_name + ".blast.6.txt"
rm_dir = elements_dir
##########################
# Step 1: Run naive
cmd1 = naive_cmd.format(naive=Locations['naive'], time=Locations['time_cmd'], f=f, seed=seed,
input_file=input_file, output_dir=elements_dir, TMPDIR = tmp_dir())
title1 = title;
p1 = launch_job(cmd=cmd1, title=title1, base_dir=elements_dir, ppn = Locations['proc_per_node'] if args.max_nodes else 1, bigmem = args.mem, attrs = {'elements':'elements_dir/elements'})
if args.consensus_type == 0:
return None
if args.consensus_type == 1:
cmd2 = consensus_cmd.format(python=Locations['python'], data_file=input_file,
elements_file=elements_dir + "/na.elements",
consensus_txt=elements_dir + "/" + consensus_txt,
consensus_fa=elements_dir + "/" + consensus_fa)
else:
cmd2 = composite_cmd.format(time = Locations['time_cmd'], TMPDIR = tmp_dir(), composite_discover = Locations['CompositeDiscover'], elements_file = elements_dir + "/elements", seq_file = input_file, output_dir = elements_dir, output_file = consensus_fa, bigmem = args.mem, ppn = Locations['proc_per_node'] if args.max_nodes else 1, consensus_file = elements_dir + "/" + consensus_fa)
title2 = "cd." + title;
p2 = launch_job(cmd=cmd2, title=title2, base_dir=elements_dir, depend=[p1], attrs = {'consensus':consensus_fa})
# Step 3: Apply repeat masker consensus output
if args.run_rm:
cmd3 = repeat_masker_cmd.format(RepeatMasker = Locations['RepeatMasker'],
library_file = elements_dir + "/" + consensus_fa, pa = args.pa,
output_dir = rm_dir,
seq_file = input_file, seq_file_base = file_base(input_file), TMPDIR = tmp_dir(),
cutoff = "" if args.rm_cutoff is None else "-cutoff " + str(args.rm_cutoff))
title3 = "rm." + title
p3 = launch_job(cmd=cmd3, title=title3, base_dir=rm_dir, walltime = args.rm_walltime, ppn = args.pa, bigmem = False,
modules = Locations['rm_modules'], depend=[p2], attrs={'rm_output':rm_dir + '/' + input_base + ".fa.out"})
# Step 4: Apply blast to consensus sequences:
if args.run_blast:
cmd4 = blast_cmd.format(blast = Locations['blast'], blast_format = blast_format, blast_dir = blast_dir, blast_file = blast_file, consensus_file = elements_dir + "/" + consensus_fa,
db_file = database_file, evalue = args.evalue, short = "-task blastn-short" if args.short else "", max_target = args.max_target,
num_threads = args.num_threads, TMPDIR = tmp_dir())
title4 = "bl." + title
p4 = launch_job(cmd=cmd4, title=title4, base_dir=elements_dir, modules=Locations['blast_modules'], depend=[p2], ppn = args.num_threads, walltime = blast_walltime_default)
#################################
build_lrm_cmd = "{time} {build_lmer_table_exe} -min {min} -sequence {seq_file} -freq {lmer_output}"
rptscout_cmd = "mkdir {TMPDIR}; cd {TMPDIR}; {time} $PBS_O_WORKDIR/{RptScout_exe} -sequence $PBS_O_WORKDIR/{seq_file} -freq $PBS_O_WORKDIR/{lmer_output} -output {TMPDIR}/{REPOUT}; cp {TMPDIR}/{REPOUT} $PBS_O_WORKDIR/{OUTDIR}/; rm -r -f {TMPDIR}"
filter1_cmd = "{filter} {input} > {filter_output}"
filter2_cmd = "cat {filtered} | {filter} --cat={rm_output} --thresh={thresh} > {filter_output}"
def rptscout_pipeline(input_file, f):
input_base = file_base(input_file).rstrip(".fa")
rptscout_dir = make_dir(args.results_dir + "/RPT_SCT")
title = "{prefix}.{file}.s0.f{f}".format(prefix = tool_prefix['RepeatScout'], file=input_base, f=f)
output_dir = make_dir((rptscout_dir + "/" + input_base + ".s0.f" + str(f)).upper())
if args.rs_filters > 0:
rptscout_dir1 = make_dir(args.results_dir + "/RPT_SCT1")
output_dir1 = make_dir((rptscout_dir1 + "/" + input_base + ".s0.f" + str(f)).upper())
else:
rptscout_dir1 = ""
output_dir1 = ""
if args.rs_filters > 1:
rptscout_dir2 = make_dir(args.results_dir + "/RPT_SCT2")
output_dir2 = make_dir((rptscout_dir2 + "/" + input_base + ".s0.f" + str(f)).upper())
else:
rptscout_dir2 = ""
output_dir2 = ""
lmer_output = output_dir + "/" + input_base + ".freq.fa"
#output_dir1 = (output_dir1 + "/" + input_base + ".s0.f" + str(f)).upper()
#output_dir2 = (output_dir2 + "/" + input_base + ".s0.f" + str(f)).upper()
rpt_sct_out = input_base + ".s0.f" + str(f) + ".repscout.fa"
output = output_dir
output1 = output_dir1 if output_dir1 else ""
output2 = output_dir2 if output_dir2 else ""
database_file = input_file.rstrip(".fa") + ".rptseq.fa"
blast_dir = output_dir
blast_dir1 = output_dir1
blast_dir2 = output_dir2
blast_file = input_base + ".s0.f" + str(f) + ".RS.blast.6.txt"
#blast_file1 = input_base + ".s0.f" + str(f) + ".RS.blast.6.txt"
#blast_file2 = input_base + ".s0.f" + str(f) + ".RS.blast.6.txt"
filter1_output = output_dir1 + "/" + input_base + ".s0.f" + str(f) + ".rptsct.filtered1.fa"
filter2_output = output_dir2 + "/" + input_base + ".s0.f" + str(f) + ".rptsct.filtered2.fa"
rm_dir = output_dir
rm_dir1 = output_dir1
rm_dir2 = output_dir2
# Step 1: Run build_lmer_table
cmd1 = build_lrm_cmd.format(time=Locations['time_cmd'], build_lmer_table_exe=Locations['build_lmer_table'], min=f, seq_file=input_file, lmer_output=lmer_output)
title1 = "lmer." + title
p1 = launch_job(cmd=cmd1, title=title1, base_dir=output_dir, ppn = 4)
# Step 2: Run RepeatScout
cmd2 = rptscout_cmd.format(time=Locations['time_cmd'], RptScout_exe=Locations['RptScout'], seq_file=input_file, lmer_output=lmer_output, TMPDIR = tmp_dir(), REPOUT = rpt_sct_out, OUTDIR = output)
title2 = title
p2 = launch_job(cmd=cmd2, title=title2, base_dir=output_dir, walltime = args.rs_walltime, bigmem = args.mem, depend=[p1], ppn = Locations['proc_per_node'] if args.max_nodes else 1)
# Step 3: Run repeatmasker
if (args.run_rm):
cmd3 = repeat_masker_cmd.format(RepeatMasker=Locations['RepeatMasker'], library_file=output +"/" + rpt_sct_out, output_dir=rm_dir, seq_file=input_file, seq_file_base = file_base(input_file), pa = args.pa, TMPDIR = tmp_dir(), cutoff = "" if args.rm_cutoff is None else "-cutoff " + str(args.rm_cutoff))
title3 = "rm." + title
p3 = launch_job(cmd=cmd3, title=title3, base_dir=output_dir, walltime = args.rm_walltime, ppn = args.pa, bigmem = False, modules = Locations['rm_modules'], depend=[p2])
else:
p3 = None
# Step 4: Apply blast
if args.run_blast:
cmd4 = blast_cmd.format(blast = Locations['blast'], blast_format = blast_format, blast_dir = blast_dir, blast_file = blast_file, consensus_file = output + "/" + rpt_sct_out,
db_file = database_file, evalue = args.evalue, short = "-task blastn-short" if args.short else "", max_target = args.max_target,
num_threads = args.num_threads, TMPDIR = tmp_dir())
title4 = "bl." + title
p4 = launch_job(cmd=cmd4, title=title4, base_dir=output_dir, modules=Locations['blast_modules'], depend=[p2], ppn = args.num_threads, walltime = blast_walltime_default)
else:
p4 = None
# Filter 1
if args.rs_filters >= 1:
sys.stderr("Filter 1 not working")
exit(1)
cmd5 = filter1_cmd.format(filter=Locations['filter_stage-1'], input=output, filter_output = filter1_output)
title5 = "f1." + title
p5 = launch_job(cmd=cmd5, title=title5, base_dir=output_dir1, depend=[p2])
if args.run_rm or args.rs_filters >= 2:
cmd6 = repeat_masker_cmd.format(RepeatMasker=Locations['RepeatMasker'], library_file=filter1_output, output_dir=rm_dir1, seq_file=input_file, seq_file_base = file_base(input_file), pa = args.pa, TMPDIR = tmp_dir(), cutoff = "" if args.rm_cutoff is None else "-cutoff " + str(args.rm_cutoff))
title6 = "rm1." + title
p6 = launch_job(cmd=cmd6, title=title6, base_dir=output_dir2, walltime = args.rm_walltime, ppn = args.pa, bigmem = False, modules = Locations['rm_modules'], depend=[p2])
else:
p6 = None
if args.run_blast:
cmd7 = blast_cmd.format(blast = Locations['blast'], blast_format = blast_format, blast_dir = blast_dir1, blast_file = blast_file,
consensus_file = filter1_output, db_file = database_file, evalue = args.evalue,
short = "-task blastn-short" if args.short else "", max_target = args.max_target, num_threads = args.num_threads,
TMPDIR = tmp_dir())
title7 = "l1." + title
p7 = launch_job(cmd=cmd7, title=title7, base_dir=output_dir1, modules=Locations['blast_modules'], depend=[p6], ppn = args.num_threads, walltime = blast_walltime_default)
else:
p7 = None
# Step 4: Filter 2
if args.rs_filters >= 2:
# Now: Run second filter
cmd8 = filter2_cmd.format(filtered=filter1_output, filter = Locations['filter_stage-2'], rm_output=rm_dir1 + "/" + input_base + ".fa.out", thresh="5",
filter_output=filter2_output)
title8 = "f2." + title
p8 = launch_job(cmd=cmd8, title=title8, base_dir=output_dir2, depend=[p6])
if (args.run_rm):
cmd9 = repeat_masker_cmd.format(RepeatMasker=Locations['RepeatMasker'], library_file=filter2_output, output_dir=rm_dir2, seq_file=input_file, seq_file_base = file_base(input_file), pa = args.pa, TMPDIR = tmp_dir(), cutoff = "" if args.rm_cutoff is None else "-cutoff " + str(args.rm_cutoff))
title9 = "m2." + title
p9 = launch_job(cmd=cmd9, title=title9, base_dir=output_dir2, walltime = args.rm_walltime, ppn = args.pa, bigmem = False, modules = Locations['rm_modules'], depend=[p8])
else:
p9 = None
if args.run_blast:
cmd10 = blast_cmd.format(blast = Locations['blast'], blast_format = blast_format, blast_dir = blast_dir2, blast_file=blast_file,
consensus_file = filter2_output, db_file = database_file, evalue = args.evalue,
short = "-task blastn-short" if args.short else "", max_target = args.max_target, num_threads = args.num_threads,
TMPDIR = tmp_dir())
title10 = "bl2." + title
p10 = launch_job(cmd=cmd10, title=title10, base_dir=output_dir2, modules=Locations['blast_modules'], depend=[p8], ppn = args.num_threads, walltime = blast_walltime_default)
else:
p10 = None
####################################################
if __name__ == "__main__":
parse_params()
setup()
for file in args.data_files:
for f in args.f:
if args.run_repscout:
rptscout_pipeline(file, f)
if args.run_raider:
for seed in seed_list:
raider_pipeline('RAIDER', file, seed, f)
if args.run_phraider:
for seed in seed_list:
raider_pipeline('phRAIDER', file, seed, f)
if args.run_prephraider:
for seed in seed_list:
raider_pipeline('pre-phRAIDER', file, seed, f)
if args.run_naive:
for seed in seed_list:
naive_pipeline(file, seed, f);
|
karroje/RAIDER_eval
|
testing_pipeline.py
|
Python
|
gpl-3.0
| 33,390
|
[
"BLAST"
] |
0e272d5df23979f8d4c3407ea637dfaa0895e1571371e57fcddca88710a9c841
|
"""
Write a function that takes an unsigned integer and
returns the number of ’1' bits it has
(also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary
representation 00000000000000000000000000001011,
so the function should return 3.
T(n)- O(k) : k is the number of 1s present in binary representation.
NOTE: this complexity is better than O(log n).
e.g. for n = 00010100000000000000000000000000
only 2 iterations are required.
Number of loops is
equal to the number of 1s in the binary representation."""
def count_ones(n):
"""Using Brian Kernighan’s Algorithm. (Recursive Approach)"""
if not n: return 0
return 1 + count_ones(n & (n - 1))
def count_ones(n):
"""Using Brian Kernighan’s Algorithm. (Iterative Approach)"""
count = 0
while n:
n &= (n - 1)
count += 1
return count
|
marcosfede/algorithms
|
bit/count_ones.py
|
Python
|
gpl-3.0
| 867
|
[
"Brian"
] |
96f538fe735ce4bd0af147cb872183ffdb10ffc7b39f2c630c585c300b5d02de
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send Conditional CLI commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_conditional_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command based on condition on devices running Lenovo CNOS
description:
- This module allows you to modify the running configuration of a switch. It provides a way to
execute a single CNOS command on a network device by evaluating the current running configuration
and executing the command only if the specific settings have not been already configured.
The CNOS command is passed as an argument of the method.
This module functions the same as the cnos_command module.
The only exception is that the following inventory variable can be specified
[“condition = <flag string>”]
When this inventory variable is specified as the variable of a task, the command is executed for
the network element that matches the flag string. Usually, commands are executed across a group
of network devices. When there is a requirement to skip the execution of the command on one or
more devices, it is recommended to use this module.
This module uses SSH to manage network device configuration.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_conditional_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is passed using
double quotes. The variables can be placed directly on to the CLI commands or can be invoked
from the vars directory.
required: true
default: Null
condition:
description:
- If you specify condition=false in the inventory file against any device, the command execution
is skipped for that device.
required: true
default: Null
flag:
description:
- If a task needs to be executed, you have to set the flag the same as it is specified in the
inventory for that device.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_conditional_command. These are written in the main.yml file of the tasks directory.
---
- name: Applying CLI template on VLAG Tier1 Leaf Switch1
cnos_conditional_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_conditional_command_{{ inventory_hostname }}_output.txt"
condition: "{{ hostvars[inventory_hostname]['condition']}}"
flag: leaf_switch2
command: "spanning-tree mode enable"
enablePassword: "anil"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
condition=dict(required=True),
flag=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True), ), supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
condition = module.params['condition']
flag = module.params['flag']
cliCommand = module.params['clicommand']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
if (condition != flag):
module.exit_json(changed=True, msg="Command Skipped for this value")
return " "
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI Command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/lenovo/cnos_conditional_command.py
|
Python
|
gpl-3.0
| 7,085
|
[
"VisIt"
] |
c90376116936e9cc298be1bae2a89b2b2365f029428af91406de817764d8747e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.