id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_uk.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u0434\u043f",
"\u043f\u043f"
],
"DAY": [
"\u043d\u0435\u0434\u0456\u043b\u044f",
"\u043f\u043e\u043d\u0435\u0434\u0456\u043b\u043e\u043a",
"\u0432\u0456\u0432\u0442\u043e\u0440\u043e\u043a",
"\u0441\u0435\u0440\u0435\u0434\u0430",
"\u0447\u0435\u0442\u0432\u0435\u0440",
"\u043f\u02bc\u044f\u0442\u043d\u0438\u0446\u044f",
"\u0441\u0443\u0431\u043e\u0442\u0430"
],
"MONTH": [
"\u0441\u0456\u0447\u043d\u044f",
"\u043b\u044e\u0442\u043e\u0433\u043e",
"\u0431\u0435\u0440\u0435\u0437\u043d\u044f",
"\u043a\u0432\u0456\u0442\u043d\u044f",
"\u0442\u0440\u0430\u0432\u043d\u044f",
"\u0447\u0435\u0440\u0432\u043d\u044f",
"\u043b\u0438\u043f\u043d\u044f",
"\u0441\u0435\u0440\u043f\u043d\u044f",
"\u0432\u0435\u0440\u0435\u0441\u043d\u044f",
"\u0436\u043e\u0432\u0442\u043d\u044f",
"\u043b\u0438\u0441\u0442\u043e\u043f\u0430\u0434\u0430",
"\u0433\u0440\u0443\u0434\u043d\u044f"
],
"SHORTDAY": [
"\u041d\u0434",
"\u041f\u043d",
"\u0412\u0442",
"\u0421\u0440",
"\u0427\u0442",
"\u041f\u0442",
"\u0421\u0431"
],
"SHORTMONTH": [
"\u0441\u0456\u0447.",
"\u043b\u044e\u0442.",
"\u0431\u0435\u0440.",
"\u043a\u0432\u0456\u0442.",
"\u0442\u0440\u0430\u0432.",
"\u0447\u0435\u0440\u0432.",
"\u043b\u0438\u043f.",
"\u0441\u0435\u0440\u043f.",
"\u0432\u0435\u0440.",
"\u0436\u043e\u0432\u0442.",
"\u043b\u0438\u0441\u0442.",
"\u0433\u0440\u0443\u0434."
],
"fullDate": "EEEE, d MMMM y '\u0440'.",
"longDate": "d MMMM y '\u0440'.",
"medium": "d MMM y '\u0440'. HH:mm:ss",
"mediumDate": "d MMM y '\u0440'.",
"mediumTime": "HH:mm:ss",
"short": "dd.MM.yy HH:mm",
"shortDate": "dd.MM.yy",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20b4",
"DECIMAL_SEP": ",",
"GROUP_SEP": "\u00a0",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "uk",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (vf.v == 0 && i % 10 == 1 && i % 100 != 11) { return PLURAL_CATEGORY.ONE; } if (vf.v == 0 && i % 10 >= 2 && i % 10 <= 4 && (i % 100 < 12 || i % 100 > 14)) { return PLURAL_CATEGORY.FEW; } if (vf.v == 0 && i % 10 == 0 || vf.v == 0 && i % 10 >= 5 && i % 10 <= 9 || vf.v == 0 && i % 100 >= 11 && i % 100 <= 14) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/AnDOviewer-0.1-py3-none-any.whl/script/AnDOviewer.py | import pandas as pd
import argparse
import os
import sys
def show_struct(directory):
"""
Show the structure of the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d "+directory
os.system(cmd)
def show_experiments(directory):
"""
Show the experiments in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 1 "+directory
os.system(cmd)
def show_subjects(directory):
"""
Show the subjects in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 2 "+directory
os.system(cmd)
def show_sessions(directory):
"""
Show the sessions in the directory given in argument
Args:
directory ([Path]): [Path of the directory to show]
"""
cmd = "tree -d -L 3 "+directory
os.system(cmd)
def main():
"""
usage: AnDO_Viewer.py [-h] [-S] [-Se] [-Su] [-Ss] pathToDir
positional arguments:
pathToDir Path to the folder to show
optional arguments:
-h, --help show this help message and exit
-S, --show show dir structure
-Se, --show_experiments
show experiments folder only
-Su, --show_subjects show subjects folder only
-Ss, --show_sessions show sessions folder only
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-S', '--show', help=' show dir structure ', action='store_true', default=True)
parser.add_argument('-Se', '--show_experiments',
help=' show experiments folder only', action='store_true')
parser.add_argument('-Su', '--show_subjects',
help=' show subjects folder only', action='store_true')
parser.add_argument('-Ss', '--show_sessions',
help=' show sessions folder only', action='store_true')
parser.add_argument('pathToDir', help='Path to the folder to show')
args = parser.parse_args()
## Check if directory exists
if not os.path.isdir(args.pathToDir):
print('Directory does not exist:', args.pathToDir)
exit(1)
if args.show:
show_struct(args.pathToDir)
if args.show_experiments:
show_experiments(args.pathToDir)
if args.show_subjects:
show_subjects(args.pathToDir)
if args.show_sessions:
show_sessions(args.pathToDir)
if __name__ == '__main__':
main() | PypiClean |
/LumberMill-0.9.5.7-py3-none-any.whl/lumbermill/modifier/AddDateTime.py | import time
import datetime
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class AddDateTime(BaseThreadedModule):
"""
Add a field with a datetime.
If source_fields is not set, datetime will be based on current time.
If source_fields is set, event will be searched for each source_field.
If found, all source_formats will be applied, to parse the date.
First successful conversion will win.
Configuration template:
- AddDateTime:
source_fields: # <default: None; type: None||list; is: optional>
source_formats: # <default: None; type: None||list; is: required if source_fields is not None else optional>
target_field: # <default: '@timestamp'; type: string; is: optional>
target_format: # <default: '%Y-%m-%dT%H:%M:%S'; type: string; is: optional>
receivers:
- NextModule
"""
module_type = "modifier"
"""Set module type"""
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.source_fields = self.getConfigurationValue('source_fields')
self.source_formats = self.getConfigurationValue('source_formats')
self.target_format = self.getConfigurationValue('target_format')
self.target_field = self.getConfigurationValue('target_field')
if self.source_fields:
self.handleEvent = self.handleEventWithSourceFields
def handleEvent(self, event):
event[self.target_field] = datetime.datetime.utcnow().strftime(self.target_format)
yield event
def handleEventWithSourceFields(self, event):
for source_field in self.source_fields:
try:
time_field = event[source_field]
except KeyError:
continue
for source_format in self.source_formats:
try:
date_time = datetime.datetime.strptime(time_field, source_format)
except ValueError:
continue
event[self.target_field] = date_time.strftime(self.target_format)
yield event | PypiClean |
/AICON-2.0.1-py3-none-any.whl/aicon/myemc.py |
import numpy as np
import sys
import time
class EffectMass(object):
'''This class is used to calculate band effective mass at specific point and store the data. '''
EMC_VERSION = '1.51py'
STENCIL = 5 #3 or 5
Bohr = 0.52917721092
def __init__(self):
###################################################################################################
#
# STENCILS for finite difference
#
# three-point stencil
self.st3 = []
self.st3.append([0.0, 0.0, 0.0]); # 0
self.st3.append([-1.0, 0.0, 0.0]); self.st3.append([1.0, 0.0, 0.0]); # dx 1-2
self.st3.append([0.0, -1.0, 0.0]); self.st3.append([0.0, 1.0, 0.0]) # dy 3-4
self.st3.append([0.0, 0.0, -1.0]); self.st3.append([0.0, 0.0, 1.0]) # dz 5-6
self.st3.append([-1.0, -1.0, 0.0]); self.st3.append([1.0, 1.0, 0.0]); self.st3.append([1.0, -1.0, 0.0]); self.st3.append([-1.0, 1.0, 0.0]); # dxdy 7-10
self.st3.append([-1.0, 0.0, -1.0]); self.st3.append([1.0, 0.0, 1.0]); self.st3.append([1.0, 0.0, -1.0]); self.st3.append([-1.0, 0.0, 1.0]); # dxdz 11-14
self.st3.append([0.0, -1.0, -1.0]); self.st3.append([0.0, 1.0, 1.0]); self.st3.append([0.0, 1.0, -1.0]); self.st3.append([0.0, -1.0, 1.0]); # dydz 15-18
#
# five-point stencil
self.st5 = []
self.st5.append([0.0, 0.0, 0.0])
#
a = [-2,-1,1,2]
for i in range(len(a)): #dx
self.st5.append([float(a[i]), 0., 0.])
#
for i in range(len(a)): #dy
self.st5.append([0., float(a[i]), 0.])
#
for i in range(len(a)): #dz
self.st5.append([0., 0., float(a[i])])
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([j1, i1, 0.]) # dxdy
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([j1, 0., i1,]) # dxdz
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([0., j1, i1]) # dydz
self.masses = np.zeros(3)
self.vecs_cart = np.zeros((3,3))
self.vecs_frac = np.zeros((3,3))
self.vecs_n = np.zeros((3,3))
def __get__(self, obj, typ = None):
return self.masses
def __str__(self):
return '%.3f %.3f %.3f' % (self.masses[0], self.masses[1], self.masses[2])
__repr__ = __str__
##################################### Class Method #####################################################
def MAT_m_VEC(self, m, v):
p = [ 0.0 for i in range(len(v)) ]
for i in range(len(m)):
assert len(v) == len(m[i]), 'Length of the matrix row is not equal to the length of the vector'
p[i] = sum( [ m[i][j]*v[j] for j in range(len(v)) ] )
return p
def T(self, m):
p = [[ m[i][j] for i in range(len( m[j] )) ] for j in range(len( m )) ]
return p
def N(self, v):
max_ = 0.
for item in v:
if abs(item) > abs(max_): max_ = item
return [ item/max_ for item in v ]
def DET_3X3(self, m):
assert len(m) == 3, 'Matrix should be of the size 3 by 3'
return m[0][0]*m[1][1]*m[2][2] + m[1][0]*m[2][1]*m[0][2] + m[2][0]*m[0][1]*m[1][2] - \
m[0][2]*m[1][1]*m[2][0] - m[2][1]*m[1][2]*m[0][0] - m[2][2]*m[0][1]*m[1][0]
def SCALE_ADJOINT_3X3(self, m, s):
a = [[0.0 for i in range(3)] for j in range(3)]
a[0][0] = (s) * (m[1][1] * m[2][2] - m[1][2] * m[2][1])
a[1][0] = (s) * (m[1][2] * m[2][0] - m[1][0] * m[2][2])
a[2][0] = (s) * (m[1][0] * m[2][1] - m[1][1] * m[2][0])
a[0][1] = (s) * (m[0][2] * m[2][1] - m[0][1] * m[2][2])
a[1][1] = (s) * (m[0][0] * m[2][2] - m[0][2] * m[2][0])
a[2][1] = (s) * (m[0][1] * m[2][0] - m[0][0] * m[2][1])
a[0][2] = (s) * (m[0][1] * m[1][2] - m[0][2] * m[1][1])
a[1][2] = (s) * (m[0][2] * m[1][0] - m[0][0] * m[1][2])
a[2][2] = (s) * (m[0][0] * m[1][1] - m[0][1] * m[1][0])
return a
def INVERT_3X3(self, m):
tmp = 1.0/self.DET_3X3(m)
return self.SCALE_ADJOINT_3X3(m, tmp)
def IS_SYMMETRIC(self, m):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] != m[j][i]: return False # automatically checks square-shape
return True
def jacobi(self, ainput):
'''
Diagonalize a real symmetric matrix using the variable threshold cyclic Jacobi method.
'''
from math import sqrt
#
a = [[ ainput[i][j] for i in range(len( ainput[j] )) ] for j in range(len( ainput )) ] # copymatrix
n = len(a)
m = len(a[0])
if n != m:
raise 'jacobi: Matrix must be square'
#
for i in range(n):
for j in range(m):
if a[i][j] != a[j][i]:
raise 'jacobi: Matrix must be symmetric'
#
tolmin = 1e-14
tol = 1e-4
#
v = [[0.0 for i in range(n)] for j in range(n)] # zeromatrix
for i in range(n):
v[i][i] = 1.0
#
maxd = 0.0
for i in range(n):
maxd = max(abs(a[i][i]),maxd)
#
for iter in range(50):
nrot = 0
for i in range(n):
for j in range(i+1,n):
aii = a[i][i]
ajj = a[j][j]
daij = abs(a[i][j])
if daij > tol*maxd: # Screen small elements
nrot = nrot + 1
s = aii - ajj
ds = abs(s)
if daij > (tolmin*ds): # Check for sufficient precision
if (tol*daij) > ds:
c = s = 1/sqrt(2.)
else:
t = a[i][j]/s
u = 0.25/sqrt(0.25+t*t)
c = sqrt(0.5+u)
s = 2.*t*u/c
#
for k in range(n):
u = a[i][k]
t = a[j][k]
a[i][k] = s*t + c*u
a[j][k] = c*t - s*u
#
for k in range(n):
u = a[k][i]
t = a[k][j]
a[k][i] = s*t + c*u
a[k][j]= c*t - s*u
#
for k in range(n):
u = v[i][k]
t = v[j][k]
v[i][k] = s*t + c*u
v[j][k] = c*t - s*u
#
a[j][i] = a[i][j] = 0.0
maxd = max(maxd,abs(a[i][i]),abs(a[j][j]))
#
if nrot == 0 and tol <= tolmin:
break
tol = max(tolmin,tol*0.99e-2)
#
if nrot != 0:
print('jacobi: [WARNING] Jacobi iteration did not converge in 50 passes!')
#
# Sort eigenvectors and values into increasing order
e = [0.0 for i in range(n)] # zerovector
for i in range(n):
e[i] = a[i][i]
for j in range(i):
if e[j] > e[i]:
(e[i],e[j]) = (e[j],e[i])
(v[i],v[j]) = (v[j],v[i])
#
return (v,e)
#
def cart2frac(self, basis, v):
return self.MAT_m_VEC( self.T(self.INVERT_3X3(basis)), v )
def fd_effmass_st3(self, e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
m[0][0] = (e[1] - 2.0*e[0] + e[2])/h**2
m[1][1] = (e[3] - 2.0*e[0] + e[4])/h**2
m[2][2] = (e[5] - 2.0*e[0] + e[6])/h**2
m[0][1] = (e[7] + e[8] - e[9] - e[10])/(4.0*h**2)
m[0][2] = (e[11] + e[12] - e[13] - e[14])/(4.0*h**2)
m[1][2] = (e[15] + e[16] - e[17] - e[18])/(4.0*h**2)
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
# print '-> fd_effmass_st3: Effective mass tensor:\n'
# for i in range(len(m)):
# print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
# print ''
# #
return m
def fd_effmass_st5(self, e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
#
m[0][0] = (-(e[1]+e[4]) + 16.0*(e[2]+e[3]) - 30.0*e[0])/(12.0*h**2)
m[1][1] = (-(e[5]+e[8]) + 16.0*(e[6]+e[7]) - 30.0*e[0])/(12.0*h**2)
m[2][2] = (-(e[9]+e[12]) + 16.0*(e[10]+e[11]) - 30.0*e[0])/(12.0*h**2)
#
m[0][1] = (-63.0*(e[15]+e[20]+e[21]+e[26]) + 63.0*(e[14]+e[17]+e[27]+e[24]) \
+44.0*(e[16]+e[25]-e[13]-e[28]) + 74.0*(e[18]+e[23]-e[19]-e[22]))/(600.0*h**2)
m[0][2] = (-63.0*(e[31]+e[36]+e[37]+e[42]) + 63.0*(e[30]+e[33]+e[43]+e[40]) \
+44.0*(e[32]+e[41]-e[29]-e[44]) + 74.0*(e[34]+e[39]-e[35]-e[38]))/(600.0*h**2)
m[1][2] = (-63.0*(e[47]+e[52]+e[53]+e[58]) + 63.0*(e[46]+e[49]+e[59]+e[56]) \
+44.0*(e[48]+e[57]-e[45]-e[60]) + 74.0*(e[50]+e[55]-e[51]-e[54]))/(600.0*h**2)
#
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
# print '-> fd_effmass_st5: Effective mass tensor:\n'
# for i in range(3):
# print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
# print ''
#
return m
def generate_kpoints(self, kpt_frac, st, h, prg, basis):
from math import pi
#
# working in the reciprocal space
m = self.INVERT_3X3(self.T(basis))
basis_r = [[ m[i][j]*2.0*pi for j in range(3) ] for i in range(3) ]
#
kpt_rec = self.MAT_m_VEC(self.T(basis_r), kpt_frac)
# print '-> generate_kpoints: K-point in reciprocal coordinates: %5.3f %5.3f %5.3f' % (kpt_rec[0], kpt_rec[1], kpt_rec[2])
#
if prg == 'V' or prg == 'P':
h = h*(1/EffectMass.Bohr) # [1/A]
#
kpoints = []
for i in range(len(st)):
k_c_ = [ kpt_rec[j] + st[i][j]*h for j in range(3) ] # getting displaced k points in Cartesian coordinates
k_f = self.cart2frac(basis_r, k_c_)
kpoints.append( [k_f[0], k_f[1], k_f[2]] )
#
return kpoints
def parse_bands_CASTEP(self, eigenval_fh, band, diff2_size, debug=False):
# Number of k-points X
nkpt = int(eigenval_fh.readline().strip().split()[3])
# Number of spin components X
spin_components = float(eigenval_fh.readline().strip().split()[4])
# Number of electrons X.00 Y.00
tmp = eigenval_fh.readline().strip().split()
if spin_components == 1:
nelec = int(float(tmp[3]))
n_electrons_down = None
elif spin_components == 2:
nelec = [float(tmp[3])]
n_electrons_down = int(float(tmp[4]))
# Number of eigenvalues X
nband = int(eigenval_fh.readline().strip().split()[3])
energies = []
# Get eigenenergies and unit cell from .bands file
while True:
line = eigenval_fh.readline()
if not line:
break
#
if 'Spin component 1' in line:
for i in range(1, nband + 1):
energy = float(eigenval_fh.readline().strip())
if band == i:
energies.append(energy)
return energies
def parse_EIGENVAL_VASP(self, eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
#
nelec, nkpt, nband = [int(s) for s in eigenval_fh.readline().split()]
# if debug: print 'From EIGENVAL: Number of the valence band is %d (NELECT/2)' % (nelec/2)
if band > nband:
print('Requested band (%d) is larger than total number of the calculated bands (%d)!' % (band, nband))
sys.exit(1)
energies = []
for i in range(diff2_size):
eigenval_fh.readline() # empty line
eigenval_fh.readline() # k point coordinates
for j in range(1, nband+1):
line = eigenval_fh.readline()
if band == j:
energies.append(float(line.split()[1])*ev2h)
# if debug: print ''
return energies
#
def parse_nscf_PWSCF(self, eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
engrs_at_k = []
energies = []
#
while True:
line = eigenval_fh.readline()
if not line:
break
#
if "End of band structure calculation" in line:
for i in range(diff2_size):
#
while True:
line = eigenval_fh.readline()
if "occupation numbers" in line:
break
#
if "k =" in line:
a = [] # energies at a k-point
eigenval_fh.readline() # empty line
#
while True:
line = eigenval_fh.readline()
if line.strip() == "": # empty line
break
#
a.extend(line.strip().split())
#
#print a
assert len(a) <= band, 'Length of the energies array at a k-point is smaller than band param'
energies.append(float(a[band-1])*ev2h)
#
#print engrs_at_k
return energies
#
def parse_inpcar(self, inpcar_fh, debug=False):
import re
#
kpt = [] # k-point at which eff. mass in reciprocal reduced coords (3 floats)
stepsize = 0.0 # stepsize for finite difference (1 float) in Bohr
band = 0 # band for which eff. mass is computed (1 int)
prg = '' # program identifier (1 char)
basis = [] # basis vectors in cartesian coords (3x3 floats), units depend on the program identifier
#
inpcar_fh.seek(0) # just in case
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
kpt = [float(p.group(1)), float(p.group(2)), float(p.group(3))]
if debug: print("Found k point in the reduced reciprocal space: %5.3f %5.3f %5.3f" % (kpt[0], kpt[1], kpt[2]))
else:
print("Was expecting k point on the line 0 (3 floats), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\d+\.\d+)', inpcar_fh.readline())
if p:
stepsize = float(p.group(1))
if debug: print("Found stepsize of: %5.3f (1/Bohr)" % stepsize)
else:
print("Was expecting a stepsize on line 1 (1 float), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\d+)', inpcar_fh.readline())
if p:
band = int(p.group(1))
if debug: print("Requested band is : %5d" % band)
else:
print("Was expecting band number on line 2 (1 int), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\w)', inpcar_fh.readline())
if p:
prg = p.group(1)
if debug: print("Program identifier is: %5c" % prg)
else:
print("Was expecting program identifier on line 3 (1 char), didn't get it, exiting...")
sys.exit(1)
for i in range(3):
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
basis.append([float(p.group(1)), float(p.group(2)), float(p.group(3))])
if debug:
print("Real space basis:")
for i in range(len(basis)):
print('%9.7f %9.7f %9.7f' % (basis[i][0], basis[i][1], basis[i][2]))
if debug: print('')
return kpt, stepsize, band, prg, basis
def get_eff_masses(self, m, basis):
#
vecs_cart = [[0.0 for i in range(3)] for j in range(3)]
vecs_frac = [[0.0 for i in range(3)] for j in range(3)]
vecs_n = [[0.0 for i in range(3)] for j in range(3)]
#
eigvec, eigval = self.jacobi(m)
#
for i in range(3):
vecs_cart[i] = eigvec[i]
vecs_frac[i] = self.cart2frac(basis, eigvec[i])
vecs_n[i] = self.N(vecs_frac[i])
#
em = [ 1.0/eigval[i] for i in range(len(eigval)) ]
return em, vecs_cart, vecs_frac, vecs_n
#
def cal_effmass(self, kpt, stepsize, band, prg, basis, output_fn):
if EffectMass.STENCIL == 3:
fd_effmass = self.fd_effmass_st3
st = self.st3
elif EffectMass.STENCIL == 5:
fd_effmass = self.fd_effmass_st5
st = self.st5
else:
print('main: [ERROR] Wrong value for STENCIL, should be 3 or 5.')
sys.exit(1)
#
#
try:
output_fh = open(output_fn, 'r')
except IOError:
sys.exit("Couldn't open input file "+output_fn+", exiting...\n")
#
if output_fn:
#
energies = []
if prg.upper() == 'V' or prg.upper() == 'C':
energies = self.parse_EIGENVAL_VASP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'Q':
energies = self.parse_nscf_PWSCF(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'P':
energies = self.parse_bands_CASTEP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
masses, vecs_cart, vecs_frac, vecs_n = self.get_eff_masses(m, basis)
self.vecs_cart = np.array(vecs_cart)
self.vecs_frac = np.array(vecs_frac)
self.vecs_n = np.array(vecs_n)
self.masses = np.array(masses)
#
maxindx =np.argmax(np.abs(self.masses))
temp = 1.0
for i in np.arange(3):
if i == maxindx:
self.parallelmass = self.masses[i]
else:
temp = temp * self.masses[i]
self.verticalmass = np.sign(self.masses[0]) * np.sqrt(temp)
self.condeffmass = 3.0 / (1/self.masses[0] + 1/self.masses[1] + 1/self.masses[2])
self.doseffmass = np.sign(self.masses[0]) * np.abs(self.masses[0] * self.masses[1] * self.masses[2])**(1/3)
return
def get_kpointsfile(self, kpt, stepsize, prg, basis):
if EffectMass.STENCIL == 3:
st = self.st3
elif EffectMass.STENCIL == 5:
st = self.st5
else:
print('main: [ERROR] Wrong value for STENCIL, should be 3 or 5.')
sys.exit(1)
kpoints = self.generate_kpoints(kpt, st, stepsize, prg, basis)
kpoints_fh = open('KPOINTS', 'w')
kpoints_fh.write("generate with stepsize: "+str(stepsize)+"\n")
kpoints_fh.write("%d\n" % len(st))
kpoints_fh.write("Reciprocal\n")
#
for i, kpt in enumerate(kpoints):
kpoints_fh.write( '%15.10f %15.10f %15.10f 0.01\n' % (kpt[0], kpt[1], kpt[2]) )
#
kpoints_fh.close()
return | PypiClean |
/CDS-1.0.1.tar.gz/CDS-1.0.1/cds/modules/deposit/views.py | from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, url_for, render_template
from flask_security import current_user
from invenio_records_ui.signals import record_viewed
from cds.modules.records.permissions import has_admin_permission, \
has_read_record_eos_path_permission
from .api import CDSDeposit
blueprint = Blueprint(
'cds_deposit',
__name__,
template_folder='templates',
static_folder='static'
)
def project_view(pid, record, template=None, **kwargs):
"""Edit project view."""
record_viewed.send(
current_app._get_current_object(),
pid=pid,
record=record,
)
return render_template(
template,
pid=pid,
record=record,
record_type='project',
)
@blueprint.app_template_filter()
def check_avc_permissions(record):
"""Check if user has permission to see EOS video library path."""
return has_read_record_eos_path_permission(current_user, record)
@blueprint.app_template_filter()
def check_if_super_admin(record):
"""Check if user is super admin."""
return has_admin_permission(current_user, record)
@blueprint.app_template_filter('tolinksjs')
def to_links_js(pid, deposit=None, dep_type=None):
"""Get API links."""
if not isinstance(deposit, CDSDeposit):
return []
if dep_type:
api_endpoint = current_app.config['DEPOSIT_RECORDS_API']
self_url = api_endpoint.format(pid_value=pid.pid_value, type=dep_type)
else:
api_endpoint = current_app.config['DEPOSIT_RECORDS_API_DEFAULT']
self_url = api_endpoint.format(pid_value=pid.pid_value)
return {
'self': self_url,
'html': url_for(
'invenio_deposit_ui.{}'.format(dep_type or pid.pid_type),
pid_value=pid.pid_value),
'bucket': current_app.config['DEPOSIT_FILES_API'] + '/{0}'.format(
str(deposit.files.bucket.id)),
'discard': self_url + '/actions/discard',
'edit': self_url + '/actions/edit',
'publish': self_url + '/actions/publish',
'files': self_url + '/files',
} | PypiClean |
/Attendance%20Module-0.3.tar.gz/Attendance Module-0.3/Attendance/static/scripts/index.js | function getRoute(route, route_prepend = window.location.pathname) {
if (route.startsWith("/")) {
return route_prepend + route.substring(1);
}
else {
return route_prepend + route;
}
}
function getClasslist() {
logConsole("Getting Class List");
const Url = getRoute('api/classlist');
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("GET", Url, false); // false for synchronous request
xmlHttp.send(null);
return xmlHttp.responseText;
}
function getCalendarEvent() {
logConsole("Getting Calendar Event");
const Url = getRoute('/api/calendar');
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("GET", Url, false); // false for synchronous request
xmlHttp.send(null);
return xmlHttp.responseText;
}
function getSummary() {
//const Url = 'http://192.168.2.103:5000/api/attendance';//swap IP for class
const Url = getRoute('/api/attendance');
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("GET", Url, false); // false for synchronous request
xmlHttp.send(null);
//alert(xmlHttp.responseText);
return xmlHttp.responseText;
}
function getAttendance(attendanceID) {
//const Url = 'http://192.168.2.103:5000/api/attendance/'+ attendanceID;//swap IP for class
const Url = getRoute('/api/attendance/' + attendanceID);
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("GET", Url, false); // false for synchronous request
xmlHttp.send(null);
//alert(xmlHttp.responseText);
return xmlHttp.responseText;
}
function addAttendance(attendance_json) {
logConsole("Sending Backend Attendance");
/*const attendance_json = { //placeholder attendance data
"id": attendance_ID,
"records": {
"studentID": "ABC",
"isPresent": true
}
} */
//const Url = 'http://192.168.2.103:5000/api/attendance/' + attendance_json.id;
const Url = getRoute('/api/attendance/' + attendance_json.id); //localhost ip, change for class
var xmlHttp = new XMLHttpRequest();
xmlHttp.open("POST", Url, false); // false for synchronous request
console.log(attendance_json); //log json object for debugging
xmlHttp.setRequestHeader("Content-Type", "application/json");
xmlHttp.send(JSON.stringify(attendance_json));
}
function fillAttendanceDropdown() {
const dropDown = document.getElementById("select-5c86");
const attendance_json = JSON.parse(getSummary());
for (let i = 0; i < attendance_json.ids.length; i++) {
const newOption = document.createElement("option");
newOption.innerText = "Attendance " + attendance_json.ids[i];
newOption.value = attendance_json.ids[i];
dropDown.appendChild(newOption);
}
}
function fillStudentList() {
const studentTable = document.getElementById("studentList");
const students = JSON.parse(getClasslist());
for (let i = 0; i < students.length; i++) {
const newRow = document.createElement("tr");
newRow.style = "height: 21px;";
const newNameCell = document.createElement("td");
newNameCell.classList.add("u-border-1", "u-border-grey-30", "u-first-column", "u-grey-5", "u-table-cell", "u-table-cell-39");
newNameCell.innerText = students[i].firstname + " " + students[i].lastname;
const newNumberCell = document.createElement("td");
newNumberCell.classList.add("u-border-1", "u-border-grey-30", "u-table-cell");
newNumberCell.innerText = students[i].studentNumber;
newRow.appendChild(newNameCell);
newRow.appendChild(newNumberCell);
studentTable.appendChild(newRow);
}
}
function fillNextAttendance() {
const nextAttendance = JSON.parse(getCalendarEvent());
const students = JSON.parse(getClasslist());
const title = document.getElementById("nextAttendanceTitle");
const time = document.getElementById("nextAttendanceTime");
title.innerText = "Attendance for: " + nextAttendance.title;
time.innerText = "(ends " + nextAttendance.dueDate + ")";
const form = document.getElementById("attendanceForm");
for (let i = 0; i < students.length; i++) {
//input + label -> inputRows -> wrapper + label -> formGroups ->form
const row = document.createElement("div");
row.classList.add("u-form-group", "u-form-input-layout-horizontal", "u-form-radiobutton", "u-label-left", "u-form-group-4");
const rowLabel = document.createElement("label");
rowLabel.classList.add("u-label", "u-spacing-10", "u-label-2")
rowLabel.innerText = students[i].firstname + " " + students[i].lastname + " - " + students[i].studentNumber;
const buttonWrapper = document.createElement("div");
buttonWrapper.classList.add("u-form-radio-button-wrapper");
const rowPresent = document.createElement("div");
rowPresent.classList.add("u-input-row");
const presentRadio = document.createElement("input");
presentRadio.type = "radio";
presentRadio.value = "Present";
presentRadio.required = "required";
presentRadio.checked = "checked";
presentRadio.id = students[i].studentNumber;
presentRadio.name = "radio" + i;
const presentLabel = document.createElement("label");
presentLabel.htmlFor = "radio" + i;
presentLabel.classList.add("u-label", "u-spacing-10", "u-label-4");
presentLabel.innerText = "Present";
const rowAbsent = document.createElement("div");
rowAbsent.classList.add("u-input-row");
const absentRadio = document.createElement("input");
absentRadio.type = "radio";
absentRadio.value = "Absent";
absentRadio.required = "required";
absentRadio.name = "radio" + i;
const absentLabel = document.createElement("label");
absentLabel.htmlFor = "radio" + i;
absentLabel.classList.add("u-label", "u-spacing-10", "u-label-4");
absentLabel.innerText = "Absent";
rowPresent.appendChild(presentRadio);
rowPresent.appendChild(presentLabel);
rowAbsent.appendChild(absentRadio);
rowAbsent.appendChild(absentLabel);
buttonWrapper.appendChild(rowPresent);
buttonWrapper.appendChild(rowAbsent);
row.appendChild(rowLabel);
row.appendChild(buttonWrapper);
form.appendChild(row);
}
const buttonRow = document.createElement("div");
buttonRow.classList.add("u-form-group", "u-form-submit", "u-label-left");
const buttonSpacer = document.createElement("label");
buttonSpacer.classList.add("u-label", "u-spacing-10", "u-label-17");
const buttonContainer = document.createElement("div");
buttonContainer.classList.add("u-align-left", "u-btn-submit-container");
const buttonInput = document.createElement("input");
buttonInput.type = "submit";
buttonInput.value = "submit";
buttonInput.classList.add("u-form-control-hidden");
buttonInput.onclick = "addAttendance()";
const buttonMessageSuccess = document.createElement("div");
buttonMessageSuccess.classList.add("u-form-send-message", "u-form-send-message-success");
buttonMessageSuccess.innerText = "New Attendance has been submitted, thank you!";
const buttonMessageFailure = document.createElement("div");
buttonMessageFailure.classList.add("u-form-send-message", "u-form-send-message-error");
buttonMessageFailure.innerText = "Attendance was not submitted, please fix errors and try again.";
const button = document.createElement("a");
button.classList.add("u-btn", "u-btn-round", "u-btn-submit", "u-btn-style", "u-radius-50", "u-btn-2");
button.onclick = function () { submitNextAttendance(); };
button.innerText = "Submit";
buttonContainer.appendChild(button);
buttonContainer.appendChild(buttonInput);
buttonRow.appendChild(buttonSpacer);
buttonRow.appendChild(buttonContainer);
form.appendChild(buttonRow);
//form.appendChild(buttonMessageSuccess); commented out because messages were not hidden
//form.appendChild(buttonMessageFailure); as intended, will fix later if messages are needed
}
function submitNextAttendance() {
const nextAttendance = JSON.parse(getCalendarEvent());
let attendanceString = '{"id": "' + nextAttendance.enterpriseID + '", "records": [';
let formOptions = document.getElementsByClassName("u-form-radiobutton");
let numOptions = formOptions.length;
for (let i = 0; i < numOptions; i++) {
if (i > 0) {
attendanceString += ', ';
}
attendanceString += '{"studentID": ';
let label = formOptions[i].lastChild.firstChild.firstChild.id;
attendanceString += label;
attendanceString += ', "isPresent": '
if (formOptions[i].lastChild.firstChild.firstChild.checked) {
attendanceString += 'true}';
}
else {
attendanceString += 'false}';
}
}
attendanceString += ']}'
console.log(attendanceString);
addAttendance(JSON.parse(attendanceString));
}
function fillPastAttendance() {
const dropDown = document.getElementById("select-5c86");
const selected = dropDown.value;
const attendance = JSON.parse(getAttendance(selected));
const table = document.getElementById("pastAttendanceTableBody");
for (let i = 0; i < table.childElementCount; i++) {//clear table
table.children[i].remove();
}
for (let i = 0; i < attendance.records.length; i++) {
const row = document.createElement("tr");
row.style = "height: 50px;";
const nameBox = document.createElement("td");
nameBox.classList.add("u-border-1", "u-border-black", "u-first-column", "u-grey-5", "u-table-cell", "u-table-cell-4");
nameBox.innerText = attendance.records[i].studentID;
const numberBox = document.createElement("td");
numberBox.classList.add("u-border-1", "u-border-grey-30", "u-table-cell");
numberBox.innerText = attendance.records[i].studentID;
const presentBox = document.createElement("td");
presentBox.classList.add("u-border-1", "u-border-grey-30", "u-table-cell");
if (attendance.records[i].isPresent) {
presentBox.innerText = "Present";
}
else {
presentBox.innerText = "Absent";
}
row.appendChild(nameBox);
row.appendChild(numberBox);
row.appendChild(presentBox);
table.appendChild(row);
}
}
/*------------------------------------------------------------------------------------------
* Function : logConsole()
* Description : This Function is used to log request and responses to the console.
* Parameters : String : the request or response to log to the console
* ------------------------------------------------------------------------------------------*/
function logConsole(loggingValue) {
//Gets the date time
if (loggingValue) {
const d = Date();
console.log("[" + loggingValue + "] " + d);
return ("[" + loggingValue + "] " + d);
}
else {
//logging page load
const d = Date();
console.log("[Page Load]" + " " + d);
return ("[Page Load]" + " " + d);
}
}
module.exports = {
logConsole, addAttendance, getRoute
}; | PypiClean |
/Menus-0.2.0.tar.gz/Menus-0.2.0/menus/example.py | import logging
from menus.menu import BaseMenu
log = logging.getLogger(__name__)
class Cool(BaseMenu):
def __init__(self):
# An option is a tuple which consists of ('Display Name', function)
commands = [('Speak', self.speak)]
super(Cool, self).__init__(commands=commands)
def speak(self):
# Used to nicely display a message towards
# the middle of the screen
self.display_msg('Cool is speaking')
# Will pause for 3 seconds
self.pause(seconds=3)
# Used to return to Cool Menu. If omitted
# the user will be returned to the Main Menu
self()
class Hot(BaseMenu):
def __init__(self):
# An option is a tuple which consists of ('Display Name', function)
commands = [('Speak', self.speak)]
super(Hot, self).__init__(commands=commands, menu_name='Really Hot')
def speak(self):
# Used to nicely display a message towards
# the middle of the screen
self.display_msg("It's getting hot in here!")
# Will pause for 3 seconds
self.pause(seconds=3)
# Used to return to Cool Menu. If omitted
# the user will be returned to the Main Menu
self()
class Keys(BaseMenu):
def __init__(self):
# An option is a tuple which consists of ('Display Name', function)
commands = [('Show Public Key', self.show_public_key)]
super(Keys, self).__init__(commands=commands)
def show_public_key(self):
log.debug('Show public key')
# Used to nicely display a message towards
# the middle of the screen
self.display_msg('thdkalfjl;da;ksfkda;fdkj')
# Will prompt user to press enter to continue
self.pause(enter_to_continue=True)
# Used to return to Cool Menu. If omitted
# the user will be returned to the Main Menu
self()
# List of menus to be used when user initializes Engine(example=True)
def load_example_menus():
return [Cool(), Hot(), Keys()] | PypiClean |
/Dragline-2.4.3-py3-none-any.whl/dragline/crawl.py | from dragline import __version__, runtime
import six
import time
from uuid import uuid4
from pytz import timezone
from datetime import datetime
from requests.compat import urlsplit
from six import b
from gevent.lock import BoundedSemaphore
from . import redisds
from .http import Request, RequestError
from .utils import Pickle
class Crawler:
def __init__(self):
self.load()
def load(self):
redis_args = dict(host=runtime.settings.REDIS_URL,
port=runtime.settings.REDIS_PORT,
db=runtime.settings.REDIS_DB)
if hasattr(runtime.settings, 'NAMESPACE'):
redis_args['namespace'] = runtime.settings.NAMESPACE
else:
redis_args['namespace'] = runtime.spider.name
self.url_set = redisds.Set('urlset', **redis_args)
self.url_queue = redisds.Queue('urlqueue', serializer=Pickle(),
**redis_args)
self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
self.runners = redisds.Dict("runner:*", **redis_args)
self.publiser = redisds.Publiser(**redis_args)
runtime.stats = redisds.Hash('stats', **redis_args)
self.conf = redisds.Hash('conf', **redis_args)
self.lock = BoundedSemaphore(1)
self.running_count = 0
if not hasattr(runtime.spider, 'allowed_domains'):
runtime.spider.allowed_domains = []
def current_time(self):
tz = timezone(runtime.settings.TIME_ZONE)
return datetime.now(tz).isoformat()
def start(self):
self.conf['DELAY'] = runtime.settings.MIN_DELAY
if not runtime.settings.RESUME and self.is_inactive():
self.url_queue.clear()
self.url_set.clear()
runtime.stats.clear()
if isinstance(runtime.spider.start, list):
requests = runtime.spider.start
else:
requests = [runtime.spider.start]
for request in requests:
if isinstance(request, six.string_types):
request = Request(request)
if request.callback is None:
request.callback = 'parse'
self.insert(request)
if runtime.stats.setnx('status', 'running') or runtime.stats.setifval('status', 'stopped', 'running'):
runtime.stats['start_time'] = self.current_time()
runtime.logger.info("Starting spider %s", dict(iter(runtime.stats)))
self.publiser.publish('status_changed:running')
else:
runtime.logger.info("Supporting %s", dict(iter(runtime.stats)))
def clear(self, finished):
self.runner.release()
status = b('finished') if finished else b('stopped')
if self.is_inactive() and runtime.stats.setifval('status', b('running'), status):
runtime.stats['end_time'] = self.current_time()
if finished:
self.url_queue.clear()
self.url_set.clear()
runtime.logger.info("%s", dict(iter(runtime.stats)))
self.publiser.publish('status_changed:stopped')
runtime.request_processor.clear()
def is_inactive(self):
return len(self.runners) == 0
def inc_count(self):
self.lock.acquire()
if self.running_count == 0:
self.runner.acquire()
self.running_count += 1
self.lock.release()
def decr_count(self):
self.lock.acquire()
self.running_count -= 1
if self.running_count == 0:
self.runner.release()
self.lock.release()
def insert(self, request, check=True):
if not isinstance(request, Request):
return
url = urlsplit(request.url)
if not all((url.scheme in ['http', 'https'], url.hostname)):
runtime.logger.debug('invalid url %s', url.geturl())
return
reqhash = request.get_unique_id(True)
if check:
check = not request.dont_filter
if check:
if runtime.spider.allowed_domains and url.hostname not in runtime.spider.allowed_domains:
runtime.logger.debug('invalid url %s (domain %s not in %s)', url.geturl(), url.hostname, str(runtime.spider.allowed_domains))
return
elif runtime.settings.UNIQUE_CHECK:
if not self.url_set.add(reqhash):
return
self.url_queue.put(request)
del request
def updatedelay(self, delay):
self.conf['DELAY'] = min(
max(runtime.settings.MIN_DELAY, delay,
(float(self.conf['DELAY']) + delay) / 2.0),
runtime.settings.MAX_DELAY)
def process_request(self, request):
response = None
response_info = dict()
redirect_info = dict()
try:
response = request.send()
if runtime.settings.AUTOTHROTTLE:
self.updatedelay(response.elapsed.seconds)
time.sleep(float(self.conf['DELAY']))
runtime.stats.inc('pages_crawled')
runtime.stats.inc("status_code:" + str(response.status))
runtime.logger.debug("status_code:%s for %s",str(response.status),request)
if response:
response_info['status_code'] = response.status
response_info['request_headers'] = dict(response.request.headers)
response_info['request_url'] = response.url
response_info['request_method'] = response.request.method
response_info['response_headers'] = dict(response.headers)
if response.ids:
response_info['request_id'] = response.ids.username
response_info['session_id'] = response.ids.password
if response.history:
for redirection in response.history:
redirect_info['status_code'] = redirection.status_code
redirect_info['request_headers'] = dict(redirection.request.headers)
redirect_info['request_url'] = redirection.url
redirect_info['request_method'] = redirection.request.method
redirect_info['response_headers'] = dict(redirection.headers)
redirect_info['request_id'] = response_info['request_id']
redirect_info['session_id'] = response_info['session_id']
runtime.logger.info(redirect_info)
runtime.logger.info(response_info)
if len(response):
runtime.stats.inc('request_bytes', len(response))
requests = request.callback(response)
if requests:
for i in requests:
self.insert(i)
except:
raise
finally:
if response is not None:
runtime.request_processor.put_response(response)
def process_url(self):
while runtime.stats['status'] == b('running'):
request = self.url_queue.get(timeout=2)
if request:
runtime.logger.debug("Processing %s", request)
self.inc_count()
try:
self.process_request(request)
except RequestError:
request.retry += 1
runtime.stats.inc('retry_count')
if request.retry >= runtime.settings.MAX_RETRY:
runtime.logger.warning("Rejecting %s and meta is %s", request, str(request.meta), exc_info=True)
else:
runtime.logger.debug("Retrying %s", request, exc_info=True)
self.insert(request, False)
except KeyboardInterrupt:
self.insert(request, False)
raise KeyboardInterrupt
except:
runtime.logger.exception("Failed to execute callback on %s and meta is %s", request, str(request.meta))
else:
runtime.logger.info("Finished processing %s", request)
finally:
self.decr_count()
else:
if self.is_inactive():
break
runtime.logger.debug("No url to process, active threads: %s", self.running_count) | PypiClean |
/BubbleDet-1.0.0.tar.gz/BubbleDet-1.0.0/examples/derivative_expansion.py | import numpy as np # arrays and maths
import matplotlib.pyplot as plt # plots
from scipy.optimize import curve_fit # fitting
from cosmoTransitions.tunneling1D import SingleFieldInstanton # bounce
from BubbleDet import BubbleConfig, ParticleConfig, BubbleDeterminant
# dimension
dim = 4
print(f"dim = {dim}")
# Lagrangian parameters
msq = 1
lam = -0.15
gsq_list = np.array([2 ** i for i in range(-4, 5)])
c6 = 0.01
# spin of heavy particle
spin = 0
# potential and its derivatives
def V(x, msq, lam, c6):
return 1 / 2 * msq * x**2 + 1 / 2 * lam * x**4 + 1 / 32 * c6 * x**6
def dV(x, msq, lam, c6):
return msq * x + 2 * lam * x**3 + 3 / 16 * c6 * x**5
def ddV(x, msq, lam, c6):
return msq + 6 * lam * x**2 + 15 / 16 * c6 * x**4
# minima
def phi_true(msq, lam, c6):
return 2*np.sqrt((-4*lam + np.sqrt(16*lam**2 - 3*c6*msq))/c6)/np.sqrt(3)
def phi_false(msq, lam, c6):
return 0
# critical mass
def msq_critical(lam, c6):
return 4 * lam**2 / c6
# for fitting
def line(x, a, b):
return a + b * x
msq_ratio_list = np.zeros(len(gsq_list))
res_list = np.zeros(len(gsq_list))
diff_LO_list = np.zeros(len(gsq_list))
diff_NLO_list = np.zeros(len(gsq_list))
err_list = np.zeros(len(gsq_list))
print(
"%-12s %-12s %-12s %-12s %-12s %-12s %-12s"
% ("gsq", "mH/mW", "S0", "S1_heavy", "diff_LO", "diff_NLO", "err")
)
# CosmoTransitions object
ct_obj = SingleFieldInstanton(
phi_true(msq, lam, c6),
phi_false(msq, lam, c6),
lambda x: V(x, msq, lam, c6),
lambda x: dV(x, msq, lam, c6),
d2V=lambda x: ddV(x, msq, lam, c6),
alpha=(dim - 1),
)
# bounce calculation
profile = ct_obj.findProfile(xtol=1e-9, phitol=1e-9)
# bounce action
S0 = ct_obj.findAction(profile)
# creating bubble config
bub_config = BubbleConfig.fromCosmoTransitions(ct_obj, profile)
# masses that don't depend on g
phi_f = phi_false(msq, lam, c6)
phi_t = phi_true(msq, lam, c6)
mPhi_f = np.sqrt(ddV(phi_f, msq, lam, c6))
mPhi_t = np.sqrt(ddV(phi_t, msq, lam, c6))
# running over parameters
for i in range(len(gsq_list)):
gsq = gsq_list[i]
# heavy mass
mChi_t = np.sqrt(gsq * phi_t**2)
# creating heavy field particle instance
heavy = ParticleConfig(
W_Phi=lambda x: gsq * x**2,
spin=spin,
dof_internal=1,
zero_modes="None",
)
# creating bubble determinant instance
bub_det_heavy = BubbleDeterminant(bub_config, heavy)
# Heavy field fluctuation determinant
S1_heavy, S1_heavy_err = bub_det_heavy.findDeterminant()
# derivative expansion
S1_heavy_LO, S1_heavy_LO_err = bub_det_heavy.findDerivativeExpansion(
heavy, NLO=False
)
S1_heavy_NLO, S1_heavy_NLO_err = bub_det_heavy.findDerivativeExpansion(
heavy, NLO=True
)
diff_LO = (S1_heavy_LO - S1_heavy) / abs(S1_heavy)
diff_NLO = (S1_heavy_NLO - S1_heavy) / abs(S1_heavy)
err = max(
abs(S1_heavy_err / S1_heavy),
abs(S1_heavy_LO_err / S1_heavy_LO),
abs(S1_heavy_NLO_err / S1_heavy_NLO),
)
# assigning lists
msq_ratio_list[i] = (mPhi_t / mChi_t) ** 2
res_list[i] = S1_heavy
diff_LO_list[i] = diff_LO
diff_NLO_list[i] = diff_NLO
err_list[i] = err
# printing results
print(
"%-12g %-12g %-12g %-12g %-12g %-12g %-12g"
% (gsq, mPhi_t / mChi_t, S0, S1_heavy, diff_LO, diff_NLO, err)
)
popt, pcov = curve_fit(
line, np.log(msq_ratio_list[2:]), np.log(abs(diff_LO_list[2:]))
)
fit_curve = np.exp(line(np.log(msq_ratio_list), *popt))
fit_label = "y = %3g + %3g * x" % (popt[0], popt[1])
plt.plot(msq_ratio_list, fit_curve, "-", label=fit_label)
popt, pcov = curve_fit(
line, np.log(msq_ratio_list[2:]), np.log(abs(diff_NLO_list[2:]))
)
fit_curve = np.exp(line(np.log(msq_ratio_list), *popt))
fit_label = "y = %3g + %3g * x" % (popt[0], popt[1])
plt.plot(msq_ratio_list, fit_curve, "-", label=fit_label)
plt.plot(
msq_ratio_list, abs(diff_LO_list), "k+", fillstyle="none", label="LO"
)
plt.plot(
msq_ratio_list, abs(diff_NLO_list), "ko", fillstyle="none", label="NLO"
)
plt.xscale("log")
plt.yscale("log")
plt.title("Derivative expansion, $d = " + str(dim) + "$")
plt.ylabel(r"$\Delta S_1 / S_1$")
plt.xlabel(r"$m_\phi^2/m_\chi^2$")
plt.legend(loc="best")
plt.tight_layout()
plt.show() | PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/core/application/plci_core_scheduler_exception.py |
import traceback
from typing import List
from muphyn.packages.core.base import LogManager
from muphyn.packages.core.application.plci_core_diagram import Diagram
from muphyn.packages.core.application.plci_core_box import Box
from muphyn.packages.core.application.plci_core_signal_event import SignalEvent
#-----------------------------------
# Class
#-----------------------------------
class SchedulerException(Exception) :
"""Est la classe qui permet de créer un retour lors d'une exception dans un planificateur."""
# -------------
# Constructors
# -------------
def __init__ (self, box_ : Box, box_bis_ : Box, events_ : List[SignalEvent], event_ : SignalEvent, diagram_ : Diagram, timing_ : float, exception_ : Exception):
self._box = box_
self._box_bis = box_bis_
self._events = events_
self._event = event_
self._diagram = diagram_
self._exception = exception_
self._timing = timing_
# -------------
# Methods
# -------------
def print (self) :
to_print_rows = []
to_print_rows.append("SCHEDULER EXCEPTION : ")
to_print_rows.append(f"\tException at : {self._timing: .3f}s")
if self._box is None :
to_print_rows.append("\tBox : No current box")
else :
to_print_rows.append(f"\tBox : {self._box.library} {self._box.name} | index : {self._box.index}")
if self._box_bis is None :
to_print_rows.append("\tBis box : No current bis box")
else :
to_print_rows.append(f"\tBix Box : {self._box.library} {self._box.name} | index : {self._box_bis.index}")
if self._event is None :
to_print_rows.append("\tCurrent event : No current event")
else :
to_print_rows.append(f"\tCurrent event : box index : {self._event.box.index} | signal index : {self._event.signal.index}")
if not self._event.signal in self._diagram.box_inputs :
to_print_rows.append("\tThe signal does not have any box to tickle !!!")
if self._events is None :
to_print_rows.append("\tEvents list : No events list")
else :
to_print_rows.append(f"\tEvents list : {len(self._events)} events in the queue")
to_print_rows.append(f"\t{''.join(traceback.format_exception(self._exception))}")
LogManager().error('\n'.join(to_print_rows))
class TerminateSchedulerException(SchedulerException):
def __init__(self, timing_: float):
super().__init__(None, None, None, None, None, timing_, self)
def print(self):
return f"Scheduler interruption at {self._timing: .3f}s" | PypiClean |
/KL_Audit_supportV1.3-1.3-py3-none-any.whl/AuditModule/core/applications/AuditManagementModules.py | from AuditModule.common import AppConstants
from AuditModule.util import Logging as LOGG
import traceback
import json
from AuditModule.core.applications import AuditUserManagementStrategies
Logger = LOGG.get_logger()
def audit_logs_modules(application_type, content_type, application_data, op_type):
try:
user_name = ""
client_id = ""
user_role_name = ""
operations = ""
module = ""
parameter_lable = {}
status = ""
strategy_json = AppConstants.AuditLogsConstants.audit_logs_mapping_json.get(application_type)
if op_type == "UserAccess":
user_name, client_id, user_role_name, module, operations, parameter_lable, status = \
audit_logs_user_access_strategies(strategy_json, content_type, application_data)
elif op_type == "DbUpdate":
user_name, client_id, user_role_name, module, operations, parameter_lable, status = \
audit_logs_db_access_strategies(strategy_json, content_type, application_data)
return user_name, client_id, user_role_name, module, operations, parameter_lable, status
except Exception as e:
audit_message = ""
action = ""
user_id = ""
json_string = {}
label = ""
Logger.error('Error in audit Log modules ', str(e))
return audit_message, action, user_id, json_string, label
def audit_logs_user_management_strategies(strategy_json, content_type, user_data):
try:
operation_type = ""
audit_message = ""
action = ""
user_id = ""
label = ""
json_string = {}
application_context = user_data.get("application_context", "")
operation_type_reference_field = strategy_json.get("strategies", {}).get(
application_context, {}).get("fields", {}).get("type", {}).get("reference_field", "")
operation_type_field_type = strategy_json.get("strategies", {}).get(application_context, {}).get(
"fields", {}).get("type", {}).get("field_type", "")
if operation_type_field_type == "direct":
operation_type = user_data.get(operation_type_reference_field, "")
if application_context == "UserManagementAC":
user_management_ac_obj = AuditUserManagementStrategies.UserManagementACStrategies()
if operation_type == "delete":
audit_message, user_id, json_string, label = \
user_management_ac_obj.generate_delete_message(strategy_json, user_data, application_context,
operation_type)
action = "Deletion"
elif operation_type == "edit":
audit_message, user_id, json_string, label = \
user_management_ac_obj.generate_edit_message(strategy_json, user_data, application_context,
operation_type)
action = "Change"
else:
audit_message, user_id, json_string, label = \
user_management_ac_obj.generate_add_message(strategy_json, user_data, application_context,
operation_type)
action = "Creation"
return audit_message[:-2], action, user_id, json_string, label
except Exception as e:
print((traceback.format_exc()))
Logger.error('Error in fetching user management strategies', str(e))
raise Exception(str(e))
def audit_logs_user_access_strategies(strategy_json, content_type, user_data):
try:
user_name = ""
client_id = ""
user_role_name = ""
operations = ""
module = ""
parameter_lable = {}
status = ""
if user_data['action'] == 'login':
response = user_data['response_json']['data']
user_name = response['user_name']
operations = user_data.get("service_context", "")
client_id = user_data.get("client_id", "")
user_role_name = response.get("user_role_name", "")
parameter_lable = response.get("parameter_lable", "")
module = response.get("module", "")
status = user_data['response_json'].get("status", "")
elif user_data['action'] == '"logout"':
user_name = user_data.get('user_id', "")
operations = user_data.get("action", "")
client_id = user_data.get('client_id', "")
user_role_name = user_data.get("user_role_name", "")
parameter_lable = user_data.get("parameter_lable", "")
status = user_data['response_json']["status"]
return user_name, client_id, user_role_name, module, operations, parameter_lable, status
except Exception as e:
print((traceback.format_exc()))
Logger.error("Error in user Access ", str(e))
raise Exception(str(e))
def audit_logs_db_access_strategies(strategy_json, content_type, user_data):
try:
user_name = ""
client_id = ""
user_role_name = ""
operations = ""
module = ""
parameter_lable = {}
status = ""
role_name = ""
if 'query_json' in user_data:
response = user_data['query_json']
user_name = response.get("user_name", "")
if not user_name:
user_name = response.get('user_id', "")
if not user_name:
user_name = user_data.get('user_id', "")
operations = user_data.get("action", "")
if not operations:
if user_data['query']:
operations = user_data['query'].get('key', "")
client_id = response.get("client_id", "")
user_role_name = response.get("user_role", "")
if type(user_role_name) is list:
user_role_name = user_role_name[0]
parameter_lable = json.dumps(user_data)
module = response.get("module", "")
status = user_data['query_json'].get("status", "success")
else:
response = user_data['query']
user_name = response.get("user_id", "")
operations = user_data.get("action", "")
module = user_data.get("module", "")
client_id = response.get("client_id", "")
user_role_name = response.get("userrole", "")
if type(user_role_name) is list:
user_role_name = user_role_name[0]
parameter_lable = json.dumps(user_data)
status = user_data.get("status", "success")
return user_name, client_id, user_role_name, module, operations, parameter_lable, status
except Exception as e:
print((traceback.format_exc()))
Logger.error("Error in DB access ", str(e)) | PypiClean |
/AFQ-Browser-0.3.tar.gz/AFQ-Browser-0.3/afqbrowser/gh_pages.py | import os
import os.path as op
import getpass
import tempfile
import pandas as pd
import github as gh
import git
def upload(target, repo_name, uname=None, upass=None, token=None, org=None,
to_vault=True):
"""
Upload an assembled AFQ-Browser site to a github pages website.
Parameters
----------
target : str
Local path to the file-system location where the AFQ-Browser files are
(need to run `assemble` before running this function)
repo_name : str
The website will be at https://<username>.github.io/<repo_name>
uname : str, optional
GitHub user-name
upass : str, optional
GitHub password
org : str, optional
When provided, this means that the website will be at:
https://<org>.github.io/<repo_name>. Defaults to use the user-name.
to_vault : bool, optional
Whether to deposit the data to afqvault. Default: True
"""
# Get all the files that will be committed/pushed
file_list = []
client_folder = op.join(target, 'client')
for path, dirs, files in os.walk(client_folder):
for f in files:
file_list.append(os.path.abspath(op.join(path, f)))
# Get credentials from the user
if uname is None:
uname = getpass.getpass("GitHub user-name? ")
if not any([upass, token]):
upass = getpass.getpass("GitHub password (leave blank if using 2FA "
"and personal access token)? ")
if not upass:
token = getpass.getpass("GitHub personal access token? ")
print('If prompted again for username and password, use your '
'access token as the password.')
login_uname = uname if token is None else token
# Create the remote repo on GitHub (use PyGithub)
g = gh.Github(login_uname, upass)
u = g.get_user()
if org is not None:
gh_org = g.get_organization(org)
remote = gh_org.create_repo(repo_name)
else:
remote = u.create_repo(repo_name)
# Create the local repo using GitPython:
r = git.Repo.init(client_folder)
# Add all of the files to the repo's gh-pages branch
r.index.add(file_list)
r.index.commit("Commit everything")
# Add a .nojekyll file
f = open(op.join(client_folder, '.nojekyll'), 'w')
f.close()
r.index.add([os.path.abspath(f.name)])
r.index.commit("Add nojekyll file")
# Push to GitHub
branch = r.create_head("gh-pages")
branch.checkout()
o = r.create_remote("origin", remote.clone_url)
assert o.exists()
o.push("gh-pages")
# Strangely, that last slash is crucial so that this works as a link:
if org is not None:
site_name = "https://" + org + ".github.io/" + repo_name + "/"
else:
site_name = "https://" + uname + ".github.io/" + repo_name + "/"
if to_vault:
# Next, we deposit to afqvault
afqvault_repo = g.get_repo('afqvault/afqvault')
# If you already have a fork, the following gives you the fork.
# Otherwise, it creates the fork:
my_fork = u.create_fork(afqvault_repo)
# Create a local copy of your fork:
tdir = tempfile.mkdtemp()
av_repo = git.Repo.init(op.join(tdir, 'afqvault'))
origin = av_repo.create_remote('origin', my_fork.clone_url)
origin.fetch()
av_repo.create_head('master', origin.refs.master)
av_repo.heads.master.set_tracking_branch(origin.refs.master)
av_repo.heads.master.checkout()
origin.pull()
# We create a new branch every time we do this, so that we can PR
# More than one time
branch_name = uname + "/" + repo_name + r.commit().hexsha
branch = av_repo.create_head(branch_name)
branch.checkout()
# Edit the manifest file with your information:
manifest_fname = op.join(tdir, 'afqvault', 'manifest.csv')
manifest = pd.read_csv(manifest_fname,
index_col=0)
shape = manifest.shape
manifest = manifest.append(pd.DataFrame(data=dict(
username=[uname if org is None else org],
repository_name=[repo_name])))
# Deduplicate -- if this site was already uploaded, we're done!
manifest = manifest.drop_duplicates()
manifest.to_csv(manifest_fname)
# Otherwise, we need to make a PR against afqvault
if manifest.shape != shape:
# Commit this change:
av_repo.index.add([os.path.abspath(manifest_fname)])
av_repo.index.commit("Adds %s" % site_name)
# Push it to that branch on your fork
origin.push(branch_name)
# Then, we create the PR against the central repo:
afqvault_repo.create_pull("Adds %s" % site_name,
"Auto-created by afqbrowser-publish",
"master",
"%s:%s" % (uname, branch_name))
return site_name | PypiClean |
/MarkDo-0.3.0.tar.gz/MarkDo-0.3.0/markdo/static/bower/codemirror/mode/rust/rust.js | CodeMirror.defineMode("rust", function() {
var indentUnit = 4, altIndentUnit = 2;
var valKeywords = {
"if": "if-style", "while": "if-style", "else": "else-style",
"do": "else-style", "ret": "else-style", "fail": "else-style",
"break": "atom", "cont": "atom", "const": "let", "resource": "fn",
"let": "let", "fn": "fn", "for": "for", "alt": "alt", "iface": "iface",
"impl": "impl", "type": "type", "enum": "enum", "mod": "mod",
"as": "op", "true": "atom", "false": "atom", "assert": "op", "check": "op",
"claim": "op", "native": "ignore", "unsafe": "ignore", "import": "else-style",
"export": "else-style", "copy": "op", "log": "op", "log_err": "op",
"use": "op", "bind": "op", "self": "atom"
};
var typeKeywords = function() {
var keywords = {"fn": "fn", "block": "fn", "obj": "obj"};
var atoms = "bool uint int i8 i16 i32 i64 u8 u16 u32 u64 float f32 f64 str char".split(" ");
for (var i = 0, e = atoms.length; i < e; ++i) keywords[atoms[i]] = "atom";
return keywords;
}();
var operatorChar = /[+\-*&%=<>!?|\.@]/;
// Tokenizer
// Used as scratch variable to communicate multiple values without
// consing up tons of objects.
var tcat, content;
function r(tc, style) {
tcat = tc;
return style;
}
function tokenBase(stream, state) {
var ch = stream.next();
if (ch == '"') {
state.tokenize = tokenString;
return state.tokenize(stream, state);
}
if (ch == "'") {
tcat = "atom";
if (stream.eat("\\")) {
if (stream.skipTo("'")) { stream.next(); return "string"; }
else { return "error"; }
} else {
stream.next();
return stream.eat("'") ? "string" : "error";
}
}
if (ch == "/") {
if (stream.eat("/")) { stream.skipToEnd(); return "comment"; }
if (stream.eat("*")) {
state.tokenize = tokenComment(1);
return state.tokenize(stream, state);
}
}
if (ch == "#") {
if (stream.eat("[")) { tcat = "open-attr"; return null; }
stream.eatWhile(/\w/);
return r("macro", "meta");
}
if (ch == ":" && stream.match(":<")) {
return r("op", null);
}
if (ch.match(/\d/) || (ch == "." && stream.eat(/\d/))) {
var flp = false;
if (!stream.match(/^x[\da-f]+/i) && !stream.match(/^b[01]+/)) {
stream.eatWhile(/\d/);
if (stream.eat(".")) { flp = true; stream.eatWhile(/\d/); }
if (stream.match(/^e[+\-]?\d+/i)) { flp = true; }
}
if (flp) stream.match(/^f(?:32|64)/);
else stream.match(/^[ui](?:8|16|32|64)/);
return r("atom", "number");
}
if (ch.match(/[()\[\]{}:;,]/)) return r(ch, null);
if (ch == "-" && stream.eat(">")) return r("->", null);
if (ch.match(operatorChar)) {
stream.eatWhile(operatorChar);
return r("op", null);
}
stream.eatWhile(/\w/);
content = stream.current();
if (stream.match(/^::\w/)) {
stream.backUp(1);
return r("prefix", "variable-2");
}
if (state.keywords.propertyIsEnumerable(content))
return r(state.keywords[content], content.match(/true|false/) ? "atom" : "keyword");
return r("name", "variable");
}
function tokenString(stream, state) {
var ch, escaped = false;
while (ch = stream.next()) {
if (ch == '"' && !escaped) {
state.tokenize = tokenBase;
return r("atom", "string");
}
escaped = !escaped && ch == "\\";
}
// Hack to not confuse the parser when a string is split in
// pieces.
return r("op", "string");
}
function tokenComment(depth) {
return function(stream, state) {
var lastCh = null, ch;
while (ch = stream.next()) {
if (ch == "/" && lastCh == "*") {
if (depth == 1) {
state.tokenize = tokenBase;
break;
} else {
state.tokenize = tokenComment(depth - 1);
return state.tokenize(stream, state);
}
}
if (ch == "*" && lastCh == "/") {
state.tokenize = tokenComment(depth + 1);
return state.tokenize(stream, state);
}
lastCh = ch;
}
return "comment";
};
}
// Parser
var cx = {state: null, stream: null, marked: null, cc: null};
function pass() {
for (var i = arguments.length - 1; i >= 0; i--) cx.cc.push(arguments[i]);
}
function cont() {
pass.apply(null, arguments);
return true;
}
function pushlex(type, info) {
var result = function() {
var state = cx.state;
state.lexical = {indented: state.indented, column: cx.stream.column(),
type: type, prev: state.lexical, info: info};
};
result.lex = true;
return result;
}
function poplex() {
var state = cx.state;
if (state.lexical.prev) {
if (state.lexical.type == ")")
state.indented = state.lexical.indented;
state.lexical = state.lexical.prev;
}
}
function typecx() { cx.state.keywords = typeKeywords; }
function valcx() { cx.state.keywords = valKeywords; }
poplex.lex = typecx.lex = valcx.lex = true;
function commasep(comb, end) {
function more(type) {
if (type == ",") return cont(comb, more);
if (type == end) return cont();
return cont(more);
}
return function(type) {
if (type == end) return cont();
return pass(comb, more);
};
}
function stat_of(comb, tag) {
return cont(pushlex("stat", tag), comb, poplex, block);
}
function block(type) {
if (type == "}") return cont();
if (type == "let") return stat_of(letdef1, "let");
if (type == "fn") return stat_of(fndef);
if (type == "type") return cont(pushlex("stat"), tydef, endstatement, poplex, block);
if (type == "enum") return stat_of(enumdef);
if (type == "mod") return stat_of(mod);
if (type == "iface") return stat_of(iface);
if (type == "impl") return stat_of(impl);
if (type == "open-attr") return cont(pushlex("]"), commasep(expression, "]"), poplex);
if (type == "ignore" || type.match(/[\]\);,]/)) return cont(block);
return pass(pushlex("stat"), expression, poplex, endstatement, block);
}
function endstatement(type) {
if (type == ";") return cont();
return pass();
}
function expression(type) {
if (type == "atom" || type == "name") return cont(maybeop);
if (type == "{") return cont(pushlex("}"), exprbrace, poplex);
if (type.match(/[\[\(]/)) return matchBrackets(type, expression);
if (type.match(/[\]\)\};,]/)) return pass();
if (type == "if-style") return cont(expression, expression);
if (type == "else-style" || type == "op") return cont(expression);
if (type == "for") return cont(pattern, maybetype, inop, expression, expression);
if (type == "alt") return cont(expression, altbody);
if (type == "fn") return cont(fndef);
if (type == "macro") return cont(macro);
return cont();
}
function maybeop(type) {
if (content == ".") return cont(maybeprop);
if (content == "::<"){return cont(typarams, maybeop);}
if (type == "op" || content == ":") return cont(expression);
if (type == "(" || type == "[") return matchBrackets(type, expression);
return pass();
}
function maybeprop(type) {
if (content.match(/^\w+$/)) {cx.marked = "variable"; return cont(maybeop);}
return pass(expression);
}
function exprbrace(type) {
if (type == "op") {
if (content == "|") return cont(blockvars, poplex, pushlex("}", "block"), block);
if (content == "||") return cont(poplex, pushlex("}", "block"), block);
}
if (content == "mutable" || (content.match(/^\w+$/) && cx.stream.peek() == ":"
&& !cx.stream.match("::", false)))
return pass(record_of(expression));
return pass(block);
}
function record_of(comb) {
function ro(type) {
if (content == "mutable" || content == "with") {cx.marked = "keyword"; return cont(ro);}
if (content.match(/^\w*$/)) {cx.marked = "variable"; return cont(ro);}
if (type == ":") return cont(comb, ro);
if (type == "}") return cont();
return cont(ro);
}
return ro;
}
function blockvars(type) {
if (type == "name") {cx.marked = "def"; return cont(blockvars);}
if (type == "op" && content == "|") return cont();
return cont(blockvars);
}
function letdef1(type) {
if (type.match(/[\]\)\};]/)) return cont();
if (content == "=") return cont(expression, letdef2);
if (type == ",") return cont(letdef1);
return pass(pattern, maybetype, letdef1);
}
function letdef2(type) {
if (type.match(/[\]\)\};,]/)) return pass(letdef1);
else return pass(expression, letdef2);
}
function maybetype(type) {
if (type == ":") return cont(typecx, rtype, valcx);
return pass();
}
function inop(type) {
if (type == "name" && content == "in") {cx.marked = "keyword"; return cont();}
return pass();
}
function fndef(type) {
if (content == "@" || content == "~") {cx.marked = "keyword"; return cont(fndef);}
if (type == "name") {cx.marked = "def"; return cont(fndef);}
if (content == "<") return cont(typarams, fndef);
if (type == "{") return pass(expression);
if (type == "(") return cont(pushlex(")"), commasep(argdef, ")"), poplex, fndef);
if (type == "->") return cont(typecx, rtype, valcx, fndef);
if (type == ";") return cont();
return cont(fndef);
}
function tydef(type) {
if (type == "name") {cx.marked = "def"; return cont(tydef);}
if (content == "<") return cont(typarams, tydef);
if (content == "=") return cont(typecx, rtype, valcx);
return cont(tydef);
}
function enumdef(type) {
if (type == "name") {cx.marked = "def"; return cont(enumdef);}
if (content == "<") return cont(typarams, enumdef);
if (content == "=") return cont(typecx, rtype, valcx, endstatement);
if (type == "{") return cont(pushlex("}"), typecx, enumblock, valcx, poplex);
return cont(enumdef);
}
function enumblock(type) {
if (type == "}") return cont();
if (type == "(") return cont(pushlex(")"), commasep(rtype, ")"), poplex, enumblock);
if (content.match(/^\w+$/)) cx.marked = "def";
return cont(enumblock);
}
function mod(type) {
if (type == "name") {cx.marked = "def"; return cont(mod);}
if (type == "{") return cont(pushlex("}"), block, poplex);
return pass();
}
function iface(type) {
if (type == "name") {cx.marked = "def"; return cont(iface);}
if (content == "<") return cont(typarams, iface);
if (type == "{") return cont(pushlex("}"), block, poplex);
return pass();
}
function impl(type) {
if (content == "<") return cont(typarams, impl);
if (content == "of" || content == "for") {cx.marked = "keyword"; return cont(rtype, impl);}
if (type == "name") {cx.marked = "def"; return cont(impl);}
if (type == "{") return cont(pushlex("}"), block, poplex);
return pass();
}
function typarams(type) {
if (content == ">") return cont();
if (content == ",") return cont(typarams);
if (content == ":") return cont(rtype, typarams);
return pass(rtype, typarams);
}
function argdef(type) {
if (type == "name") {cx.marked = "def"; return cont(argdef);}
if (type == ":") return cont(typecx, rtype, valcx);
return pass();
}
function rtype(type) {
if (type == "name") {cx.marked = "variable-3"; return cont(rtypemaybeparam); }
if (content == "mutable") {cx.marked = "keyword"; return cont(rtype);}
if (type == "atom") return cont(rtypemaybeparam);
if (type == "op" || type == "obj") return cont(rtype);
if (type == "fn") return cont(fntype);
if (type == "{") return cont(pushlex("{"), record_of(rtype), poplex);
return matchBrackets(type, rtype);
}
function rtypemaybeparam(type) {
if (content == "<") return cont(typarams);
return pass();
}
function fntype(type) {
if (type == "(") return cont(pushlex("("), commasep(rtype, ")"), poplex, fntype);
if (type == "->") return cont(rtype);
return pass();
}
function pattern(type) {
if (type == "name") {cx.marked = "def"; return cont(patternmaybeop);}
if (type == "atom") return cont(patternmaybeop);
if (type == "op") return cont(pattern);
if (type.match(/[\]\)\};,]/)) return pass();
return matchBrackets(type, pattern);
}
function patternmaybeop(type) {
if (type == "op" && content == ".") return cont();
if (content == "to") {cx.marked = "keyword"; return cont(pattern);}
else return pass();
}
function altbody(type) {
if (type == "{") return cont(pushlex("}", "alt"), altblock1, poplex);
return pass();
}
function altblock1(type) {
if (type == "}") return cont();
if (type == "|") return cont(altblock1);
if (content == "when") {cx.marked = "keyword"; return cont(expression, altblock2);}
if (type.match(/[\]\);,]/)) return cont(altblock1);
return pass(pattern, altblock2);
}
function altblock2(type) {
if (type == "{") return cont(pushlex("}", "alt"), block, poplex, altblock1);
else return pass(altblock1);
}
function macro(type) {
if (type.match(/[\[\(\{]/)) return matchBrackets(type, expression);
return pass();
}
function matchBrackets(type, comb) {
if (type == "[") return cont(pushlex("]"), commasep(comb, "]"), poplex);
if (type == "(") return cont(pushlex(")"), commasep(comb, ")"), poplex);
if (type == "{") return cont(pushlex("}"), commasep(comb, "}"), poplex);
return cont();
}
function parse(state, stream, style) {
var cc = state.cc;
// Communicate our context to the combinators.
// (Less wasteful than consing up a hundred closures on every call.)
cx.state = state; cx.stream = stream; cx.marked = null, cx.cc = cc;
while (true) {
var combinator = cc.length ? cc.pop() : block;
if (combinator(tcat)) {
while(cc.length && cc[cc.length - 1].lex)
cc.pop()();
return cx.marked || style;
}
}
}
return {
startState: function() {
return {
tokenize: tokenBase,
cc: [],
lexical: {indented: -indentUnit, column: 0, type: "top", align: false},
keywords: valKeywords,
indented: 0
};
},
token: function(stream, state) {
if (stream.sol()) {
if (!state.lexical.hasOwnProperty("align"))
state.lexical.align = false;
state.indented = stream.indentation();
}
if (stream.eatSpace()) return null;
tcat = content = null;
var style = state.tokenize(stream, state);
if (style == "comment") return style;
if (!state.lexical.hasOwnProperty("align"))
state.lexical.align = true;
if (tcat == "prefix") return style;
if (!content) content = stream.current();
return parse(state, stream, style);
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase) return 0;
var firstChar = textAfter && textAfter.charAt(0), lexical = state.lexical,
type = lexical.type, closing = firstChar == type;
if (type == "stat") return lexical.indented + indentUnit;
if (lexical.align) return lexical.column + (closing ? 0 : 1);
return lexical.indented + (closing ? 0 : (lexical.info == "alt" ? altIndentUnit : indentUnit));
},
electricChars: "{}"
};
});
CodeMirror.defineMIME("text/x-rustsrc", "rust"); | PypiClean |
/Buycoins%20Python%20SDK-0.0.1.tar.gz/Buycoins Python SDK-0.0.1/README.md | # Buycoins Python SDK
Python SDK for Buycoins

# Introduction
Buycoins SDK is a Python SDK for the Buycoins API
With this SDK, you gain access to all the functionality of [the official Buycoins API](https://developers.buycoins.africa).
This means that you can:
* Buy and sell cryptocurrencies like Bitcoin and Ethereum instantly
* Perform P2P trading on the Buycoins platform
* Get the latest prices of various cryptocurrencies in Naira and so on.
## Features
Buycoins Python SDK comes with amazing features and benefits such as:
* **Strong emphasis on types**
Everyone loves types!
All Buycoins GraphQL types and enums have a corresponding native Python Class or Enum provided by this package.
There are also convenience methods for converting fields of a GraphQL response to a native Python object instantly.
* **Capability**
With this SDK, you have access to all the functionality of the official Buycoins API
* **Flexibility**
While you can do almost anything you want to do with the classes provided, you can also write your own custom queries if you choose to, and so much more
You can learn more about Buycoins SDK through the [official documentation](https://buycoins-python-sdk.readthedocs.io/en/latest/index.html) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/mroz.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def mroz(path):
"""U.S. Women's Labor-Force Participation
The `Mroz` data frame has 753 rows and 8 columns. The observations,
from the Panel Study of Income Dynamics (PSID), are married women.
This data frame contains the following columns:
lfp
labor-force participation; a factor with levels: `no`; `yes`.
k5
number of children 5 years old or younger.
k618
number of children 6 to 18 years old.
age
in years.
wc
wife's college attendance; a factor with levels: `no`; `yes`.
hc
husband's college attendance; a factor with levels: `no`; `yes`.
lwg
log expected wage rate; for women in the labor force, the actual
wage rate; for women not in the labor force, an imputed value based
on the regression of `lwg` on the other variables.
inc
family income exclusive of wife's income.
Mroz, T. A. (1987) The sensitivity of an empirical model of married
women's hours of work to economic and statistical assumptions.
*Econometrica* **55**, 765–799.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mroz.csv`.
Returns:
Tuple of np.ndarray `x_train` with 753 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'mroz.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/car/Mroz.csv'
maybe_download_and_extract(path, url,
save_file_name='mroz.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/CheckMates-0.2.0-py3-none-any.whl/checkmates/utils/woodwork_utils.py | import numpy as np
import pandas as pd
import woodwork as ww
numeric_and_boolean_ww = [
ww.logical_types.Integer.type_string,
ww.logical_types.Double.type_string,
ww.logical_types.Boolean.type_string,
ww.logical_types.Age.type_string,
ww.logical_types.AgeFractional.type_string,
ww.logical_types.IntegerNullable.type_string,
ww.logical_types.BooleanNullable.type_string,
ww.logical_types.AgeNullable.type_string,
]
def _numpy_to_pandas(array):
if len(array.shape) == 1:
data = pd.Series(array)
else:
data = pd.DataFrame(array)
return data
def _list_to_pandas(list):
return _numpy_to_pandas(np.array(list))
def infer_feature_types(data, feature_types=None):
"""Create a Woodwork structure from the given list, pandas, or numpy input, with specified types for columns. If a column's type is not specified, it will be inferred by Woodwork.
Args:
data (pd.DataFrame, pd.Series): Input data to convert to a Woodwork data structure.
feature_types (string, ww.logical_type obj, dict, optional): If data is a 2D structure, feature_types must be a dictionary
mapping column names to the type of data represented in the column. If data is a 1D structure, then feature_types must be
a Woodwork logical type or a string representing a Woodwork logical type ("Double", "Integer", "Boolean", "Categorical", "Datetime", "NaturalLanguage")
Returns:
A Woodwork data structure where the data type of each column was either specified or inferred.
Raises:
ValueError: If there is a mismatch between the dataframe and the woodwork schema.
"""
if isinstance(data, list):
data = _list_to_pandas(data)
elif isinstance(data, np.ndarray):
data = _numpy_to_pandas(data)
if data.ww.schema is not None:
if isinstance(data, pd.DataFrame) and not ww.is_schema_valid(
data,
data.ww.schema,
):
ww_error = ww.get_invalid_schema_message(data, data.ww.schema)
if "dtype mismatch" in ww_error:
ww_error = (
"Dataframe types are not consistent with logical types. This usually happens "
"when a data transformation does not go through the ww accessor. Call df.ww.init() to "
f"get rid of this message. This is a more detailed message about the mismatch: {ww_error}"
)
else:
ww_error = f"{ww_error}. Please initialize ww with df.ww.init() to get rid of this message."
raise ValueError(ww_error)
return data
if isinstance(data, pd.Series):
if all(data.isna()):
data = data.replace(pd.NA, np.nan)
feature_types = "Double"
return ww.init_series(data, logical_type=feature_types)
else:
ww_data = data.copy()
ww_data.ww.init(logical_types=feature_types)
return ww_data | PypiClean |
/EpyNN-1.2.11.tar.gz/EpyNN-1.2.11/epynn/gru/parameters.py | import numpy as np
def gru_compute_shapes(layer, A):
"""Compute forward shapes and dimensions from input for layer.
"""
X = A # Input of current layer
layer.fs['X'] = X.shape # (m, s, e)
layer.d['m'] = layer.fs['X'][0] # Number of samples (m)
layer.d['s'] = layer.fs['X'][1] # Steps in sequence (s)
layer.d['e'] = layer.fs['X'][2] # Elements per step (e)
# Parameter Shapes Unit cells (u)
eu = (layer.d['e'], layer.d['u']) # (e, u)
uu = (layer.d['u'], layer.d['u']) # (u, u)
u1 = (1, layer.d['u']) # (1, u)
# Update gate Reset gate Hidden hat
layer.fs['Uz'] = layer.fs['Ur'] = layer.fs['Uhh'] = eu
layer.fs['Vz'] = layer.fs['Vr'] = layer.fs['Vhh'] = uu
layer.fs['bz'] = layer.fs['br'] = layer.fs['bhh'] = u1
# Shape of hidden state (h) with respect to steps (s)
layer.fs['h'] = (layer.d['m'], layer.d['s'], layer.d['u'])
return None
def gru_initialize_parameters(layer):
"""Initialize trainable parameters from shapes for layer.
"""
# For linear activation of update gate (z_)
layer.p['Uz'] = layer.initialization(layer.fs['Uz'], rng=layer.np_rng)
layer.p['Vz'] = layer.initialization(layer.fs['Vz'], rng=layer.np_rng)
layer.p['bz'] = np.zeros(layer.fs['bz']) # dot(X, U) + dot(hp, V) + b
# For linear activation of reset gate (r_)
layer.p['Ur'] = layer.initialization(layer.fs['Ur'], rng=layer.np_rng)
layer.p['Vr'] = layer.initialization(layer.fs['Vr'], rng=layer.np_rng)
layer.p['br'] = np.zeros(layer.fs['br']) # dot(X, U) + dot(hp, V) + b
# For linear activation of hidden hat (hh_)
layer.p['Uhh'] = layer.initialization(layer.fs['Uhh'], rng=layer.np_rng)
layer.p['Vhh'] = layer.initialization(layer.fs['Vhh'], rng=layer.np_rng)
layer.p['bhh'] = np.zeros(layer.fs['bhh']) # dot(X, U) + dot(r * hp, V) + b
return None
def gru_compute_gradients(layer):
"""Compute gradients with respect to weight and bias for layer.
"""
# Gradients initialization with respect to parameters
for parameter in layer.p.keys():
gradient = 'd' + parameter
layer.g[gradient] = np.zeros_like(layer.p[parameter])
# Reverse iteration over sequence steps
for s in reversed(range(layer.d['s'])):
X = layer.fc['X'][:, s] # Input for current step
hp = layer.fc['hp'][:, s] # Previous hidden state
# (1) Gradients of the loss with respect to U, V, b
dhh_ = layer.bc['dhh_'][:, s] # Gradient w.r.t hidden hat hh_
layer.g['dUhh'] += np.dot(X.T, dhh_) # (1.1) dL/dUhh
layer.g['dVhh'] += np.dot((layer.fc['r'][:, s] * hp).T, dhh_)
layer.g['dbhh'] += np.sum(dhh_, axis=0) # (1.3) dL/dbhh
# (2) Gradients of the loss with respect to U, V, b
dz_ = layer.bc['dz_'][:, s] # Gradient w.r.t update gate z_
layer.g['dUz'] += np.dot(X.T, dz_) # (2.1) dL/dUz
layer.g['dVz'] += np.dot(hp.T, dz_) # (2.2) dL/dVz
layer.g['dbz'] += np.sum(dz_, axis=0) # (2.3) dL/dbz
# (3) Gradients of the loss with respect to U, V, b
dr_ = layer.bc['dr_'][:, s] # Gradient w.r.t reset gate r_
layer.g['dUr'] += np.dot(X.T, dr_) # (3.1) dL/dUr
layer.g['dVr'] += np.dot(hp.T, dr_) # (3.2) dL/dVr
layer.g['dbr'] += np.sum(dr_, axis=0) # (3.3) dL/dbr
return None
def gru_update_parameters(layer):
"""Update parameters from gradients for layer.
"""
for gradient in layer.g.keys():
parameter = gradient[1:]
# Update is driven by learning rate and gradients
layer.p[parameter] -= layer.lrate[layer.e] * layer.g[gradient]
return None | PypiClean |
/HPPPM-0.1.tar.gz/HPPPM-0.1/hpppm/demand_management.py | import re
from datetime import datetime
from httplib import *
from jinja2 import *
from hpppm.error_handler import *
__version__ = '0.1'
class DemandManagement(ErrorHandler):
"""
A framework that helps automate the Web service interaction offered by
HP Project and Portfolio Management(aka - HPPPM).HPPPM is an industry
wide tool that is used to standardize, manage and capture the execution
of a project and operational activities.For more on HPPPM refer the
online documentation at HP.HPPPM offers Web Service operations to
various interfacing applications involved in a project to talk to each
other.HPPPM offers solutions for various activities of an organization
viz - application portfolio, demand, financial and so on.This framework
currently supports Demand Management only.
The framework is built up on 3 modules that have a designated task to do:
field_parser - A Higher Order Python parser meant to parse the input fields
that will be used in creating the Web service request.
This module is generic and can be used by others after
tweaking as per need.
error_handler - Performs command line parsing, validation and error/info
extraction.
demand_management - Creates the Web Service request and does an HTTP post
to the Web service.
All the above modules offer utilities/methods/functions to the outside
world.The framework is typically meant to run via a wrapper script that
uses the utilities offered.A sample wrapper script is bundled along with
this distribution under the bin dir.
SYNOPSIS:
Command Call:
python bin/hpppm_demand_management.py -o createRequest -u user -p password -f data/createRequest.data -c cfg/logging.conf
-o is the webservice operation being performed
-u user authorized to perform web service operation
-p user's password
-f location of file containing input fields that will be used to create
the web service request.Instead of a path this can also be a string
containing the input fields.A sample data file for each web service
operation has been bundled along with distribution under data dir.
-c location to the configuration file that drives logging behavior.
Utilites and typical usage:
import hpppm.field_parser
from hpppm.demand_management import *
hpdm = DemandManagement();
fields = hpdm.validate_read_cmdargs(sys.argv)
tags = hpdm.get_inputs(hpdm.get_current_oper())
inputs = hpppm.field_parser.parser(fields, tags)
ret = hpdm.validate_inputs(inputs)
if 'fields' in tags: ret = hpdm.validate_tokens(inputs['fields'])
req = hpdm.create_request(inputs)
res = hpdm.post_request(inputs['serviceUrl'][0], req)
ret = hpdm.extract(res, to_extract=['faultcode', 'faultstring', 'exception:detail', 'id', 'return'])
DETAILS:
A little knowledge in how HPPPM works is absolutely necessary if you
intend to use this framework to automate webservice calling for you.
In HPPPM each work item is designated as a request and is similar in
concept to a ticket in many ticketing systems.
A request in HPPPM is made up of request type, request header type
and workflow.The request type and header are made up of request fields,
validations, rules, security and statuses.The workflow is the request
component that gets activated once the request is submitted.The workflow
is made up various sub components that are classified as Executional,
Decisional, Conditional and SubWorkflows.The Decisional subcompnents
are the trigger points for user action and they in turn trigger the
Executional and/or Conditional sub components as governed by the
business logic.Please note that all fields have a unique token name
through which it is referenced internally and also in the Webservice
call.
Following are the Web Service Operations that the framework helps you
play with:
addRequestNotes - Add notes to an existing PPM request.
createRequest - Create a new request in PPM.
deleteRequest - Delete PPM requests.
executeWFTransitions - Move workflow and the request as a whole from
one Decision step to another.
getRequests - Get PPM request fields and their values.
setRequestFields - Update fields of an existing PPM request.
setRequestRemoteReferenceStatus - Updates the status of a remote
reference in a request in PPM.
example:
Let us assume that application XYZ wants to create a HP PPM request
using this framework.XYZ application will need the following(apart
from this framework installed and working)
username of the user authorized in PPM to do the webservice operation
password of the above user in PPM
input fields in the format the framework expects
A sample input field format:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<requestType>" "ABC" "</requestType>" "<fields>" "REQ.VP.APPLICATION" "COMMON" "REQ.VP.ID" "1102" "REQD.VP.RELATED" "No" "REQ.VP.PRIORITY" "2" "</fields>" "<URLReferences>" "abc" "abc" "abc" "</URLReferences>" "<notes>" "varun" "test by varun" "</notes>"
All token names and their values go inside the <fields> tags.If you are
setting URLReferences they must atleast have a single field which is the
name("abc" above) of the URLReference that will appear in the PPM request.
For notes write the authorname first followed by the note.Enclose all tags
,fields and their values in double quotes and separated by spaces.
The XYZ application needs to change the input fields as per their requirement
and use the command call listed in SYNOPSIS to create a request in the PPM
environment enclosed between serviceUrl tag.
Following is a listing of supported Web services operations and their
mandatory input types:
createRequest : serviceUrl, requestType, fields
addRequestNotes : serviceUrl, requestId, notes
executeWFTransitions : serviceUrl, receiver, transition
deleteRequests : serviceUrl, requestIds
getRequests : serviceUrl, requestIds
setRequestFields : serviceUrl, requestId, fields
setRequestRemoteReferenceStatus : serviceUrl, receiver, source, status, fields
Following is the sample input for various operations supported by this
framework:
addRequestNotes:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<requestId>" "30990" "</requestId>" "<notes>" "varun" "test by varun" "</notes>"
deleteRequests:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<requestIds>" "31520" "31521" "</requestIds>"
executeWFTransitions:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<receiver>" "31490" "</receiver>" "<transition>" "Review Complete" "</transition>"
getRequests:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<requestIds>" "30935" "30936" "</requestIds>"
setRequestFields:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<requestId>" "31490" "</requestId>" "<fields>" "REQD.VP.ORG" "ABC" "REQD.VP.DETAILED_DESC" "Test by Varun" "</fields>"
setRequestRemoteReferenceStatus:
"<serviceUrl>" "http://abc.com:8080/ppmservices/DemandService?wsdl" "</serviceUrl>" "<receiver>" "31490" "http://t.com:8090" "</receiver>" "<source>" "31490" "http://t.com:8090" "</source>" "<status>" "Assigned" "</status>" "<fields>" "REQD.VP.ORG" "Another test" "REQD.VP.DETAILED_DESC" "Another test Varun" "</fields>"
For reference sake the above sample inputs for various operations is also
saved under data dir.
LOGGING & DEBUGGING:
To enable troubleshooting the framework logs activites in a log file(
sample stored under logs dir).The logging is controlled via a config
file stored under cfg dir.
A VERY IMPORTANT NOTE:
The framework supports test driven development and has a test suite
to help in unit testing.The test suite can be located under the test
dir.Also, before using this framework take a look at the various
templates under the templates directory and modify them as per your
specifications.This framework works for HPPPM 9.14 and is backward
compatiable as well.However, if you come across any deviations please
feel free to mail me your observations.
"""
def create_request(self, inputs):
""" Create request from inputs passed using templates """
logger = logging.getLogger(__name__)
operation = self.data['CURRENT_OPERATION']
self.data['DATETIME'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S%Z")
logger.info("Creating Request for "+operation+" operation")
try:
env = Environment(loader=PackageLoader('hpppm', 'templates'))
template = env.get_template(operation+'.xml')
inputs.update(self.data)
request = template.render(inputs)
except TemplateNotFound, err:
logger.error("Req creation failed Error: "+str(err)+" not found")
sys.exit(1)
except UndefinedError, err:
logger.error("Req creation failed Error: "+str(err)+" not defined")
sys.exit(1)
except TemplateSyntaxError, err:
logger.error("Req creation failed Error: "+str(err)+" syntax error")
sys.exit(1)
logger.info("Request created successfully!")
logger.debug("Request created:\n"+request)
return request
def post_request(self, url, request, host=None, port=None):
""" POSTs the request to the url passed in.Tries to extract the host
and port from the url if host and port are not passed in.Checks if
the web service url is available before posting the request.
"""
logger = logging.getLogger(__name__)
operation = self.data['CURRENT_OPERATION']
if not self.check_url_availability(url): return False
if not (host and port):
match = re.search(r'://(?P<host>.+?):(?P<port>\d+)/', url)
host, port = match.group('host'), match.group('port')
logger.info("About to POST above request to "+url)
try:
http = HTTPConnection(host, port)
http.request("POST", url, body=request, headers = {
"SOAPAction": operation,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": len(request)
})
response = http.getresponse().read()
except HTTPException, err:
logger.error("Posting failed Error: "+str(err))
sys.exit(1)
logger.info("POSTing successful!")
logger.debug("Response received:\n"+response)
return response
if __name__ == '__main__':
pass | PypiClean |
/NICpolpy-0.1.5-py3-none-any.whl/nicpolpy/ysfitsutilpy4nicpolpy/astroim.py |
# from astropy import units as u
# from astropy.io import fits
# from astropy.nddata import CCDData
# from .hduutil import _parse_extension, load_ccd, set_ccd_gain_rdnoise
# try:
# import fitsio
# HAS_FITSIO = True
# except ImportError:
# HAS_FITSIO = False
# # class AstroImageMixin:
# # @classmethod
# # def load_header():
# class AstroImage:
# def __init__(self, data=None, header=None, path=None, extension=None,
# keys_attr={"gain": ("GAIN", 1), "rdnoise": ("RDNOISE", 0), "exptime": ("EXPTIME", 1)},
# verbose=True, update_header=True):
# self.path = path
# self.extension = extension
# self.data = data
# self.header = header
# if (self.header is not None) and keys_attr:
# for attr, (key, default) in keys_attr.items():
# if key in self.header:
# setattr(self, attr, self.header[key])
# else:
# setattr(self, attr, default)
# @classmethod
# def frompath(cls, path, load_header=True, *args, ext=None, extname=None, extver=None, **kwargs):
# extension = _parse_extension(*args, ext=ext, extname=extname, extver=extver)
# if load_header:
# hdu = fits.open(path, **kwargs)[extension]
# return cls(data=hdu.data, header=hdu.header, path=path, extension=extension)
# else:
# if HAS_FITSIO:
# data = fitsio.read(path)
# else:
# data = fits.getdata(path)
# # def __init__(self, fpath, load_header=True,
# # keys_attr={"gain": ("GAIN", 1), "rdnoise": ("RDNOISE", 0), "exptime": ("EXPTIME", 1)},
# # verbose=True, update_header=True):
# # self.fpath = Path(fpath)
# # if load_header:
# # self.hdu =
# # self.bias_cor = False
# # self.dark_cor = False
# # self.ovsc_cor = False
# # self.flat_cor = False
# # self.crrej_cor = False
# # def info(self):
# # ''' Prints information (fits.fitsinfo())
# # '''
# # pass | PypiClean |
/KratosStructuralMechanicsApplication-9.4-cp310-cp310-win_amd64.whl/KratosMultiphysics/StructuralMechanicsApplication/trilinos_structural_mechanics_implicit_dynamic_solver.py | import KratosMultiphysics
# Import applications
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.TrilinosApplication as TrilinosApplication
# Import base class file
from KratosMultiphysics.StructuralMechanicsApplication.trilinos_structural_mechanics_solver import TrilinosMechanicalSolver
from KratosMultiphysics.StructuralMechanicsApplication import auxiliary_methods_solvers
def CreateSolver(model, custom_settings):
return TrilinosImplicitMechanicalSolver(model, custom_settings)
class TrilinosImplicitMechanicalSolver(TrilinosMechanicalSolver):
"""The trilinos structural mechanics implicit dynamic solver.
For more information see:
structural_mechanics_solver.py
trilinos_structural_mechanics_solver.py
"""
def __init__(self, model, custom_settings):
# Construct the base solver.
super().__init__(model, custom_settings)
KratosMultiphysics.Logger.PrintInfo("::[TrilinosImplicitMechanicalSolver]:: ", "Construction finished")
@classmethod
def GetDefaultParameters(cls):
this_defaults = KratosMultiphysics.Parameters("""{
"time_integration_method" : "implicit",
"scheme_type" : "bossak",
"damp_factor_m" :-0.3,
"rayleigh_alpha" : 0.0,
"rayleigh_beta" : 0.0
}""")
this_defaults.AddMissingParameters(super().GetDefaultParameters())
return this_defaults
def AddVariables(self):
super().AddVariables()
self._add_dynamic_variables()
KratosMultiphysics.Logger.PrintInfo("::[TrilinosImplicitMechanicalSolver]:: Variables ADDED")
def AddDofs(self):
super().AddDofs()
self._add_dynamic_dofs()
KratosMultiphysics.Logger.PrintInfo("::[TrilinosImplicitMechanicalSolver]:: DOF's ADDED")
def GetMinimumBufferSize(self):
base_min_buffer_size = super().GetMinimumBufferSize()
scheme_type = self.settings["scheme_type"].GetString()
if "bdf" in scheme_type or scheme_type == "backward_euler":
return max(base_min_buffer_size, auxiliary_methods_solvers.GetBDFIntegrationOrder(scheme_type)+1)
else:
return base_min_buffer_size
#### Private functions ####
def _CreateScheme(self):
scheme_type = self.settings["scheme_type"].GetString()
process_info = self.main_model_part.ProcessInfo
process_info[StructuralMechanicsApplication.RAYLEIGH_ALPHA] = self.settings["rayleigh_alpha"].GetDouble()
process_info[StructuralMechanicsApplication.RAYLEIGH_BETA] = self.settings["rayleigh_beta"].GetDouble()
if scheme_type == "newmark":
damp_factor_m = 0.0
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBossakDisplacementScheme(damp_factor_m)
elif scheme_type == "bossak":
damp_factor_m = self.settings["damp_factor_m"].GetDouble()
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBossakDisplacementScheme(damp_factor_m)
elif scheme_type.startswith("bdf") or scheme_type == "backward_euler" :
order = auxiliary_methods_solvers.GetBDFIntegrationOrder(scheme_type)
# In case of rotation dof we declare the dynamic variables
if self.settings["rotation_dofs"].GetBool():
bdf_parameters = KratosMultiphysics.Parameters(""" {
"domain_size" : 3,
"integration_order" : 2,
"solution_variables" : ["DISPLACEMENT","ROTATION"]
} """)
bdf_parameters["domain_size"].SetInt(process_info[KratosMultiphysics.DOMAIN_SIZE])
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBDFCustomScheme(order, bdf_parameters)
else:
mechanical_scheme = TrilinosApplication.TrilinosResidualBasedBDFDisplacementScheme(order)
else:
err_msg = "The requested scheme type \"" + scheme_type + "\" is not available!\n"
err_msg += "Available options are: \"newmark\", \"bossak\""
raise Exception(err_msg)
return mechanical_scheme | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/extensions/MathML/mml3.js | MathJax.Extension["MathML/mml3"]={version:"2.7.9"};MathJax.Hub.Register.StartupHook("MathML Jax Ready",function(){var d=MathJax.InputJax.MathML,g=d.Parse.prototype;d.prefilterHooks.Add(function(j){if(!d.mml3XSLT){return}if(!d.ParseXML){d.ParseXML=d.createParser()}var k=d.ParseXML(g.preProcessMath(j.math));var i=d.mml3XSLT.transformToDocument(k);if((typeof i)==="string"){j.math=i}else{if(window.XMLSerializer){var h=new XMLSerializer();j.math=h.serializeToString(i.documentElement,k)}}});var f=MathJax.Hub.Browser;var c="";if(f.isEdge||f.isMSIE){c="urn:schemas-microsoft-com:xslt"}else{c="http://exslt.org/common"}var e='<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:m="http://www.w3.org/1998/Math/MathML" xmlns:c="'+c+'" exclude-result-prefixes="m c"><xsl:output indent="yes" omit-xml-declaration="yes"/><xsl:output indent="yes" omit-xml-declaration="yes"/><xsl:template match="*"> <xsl:copy> <xsl:copy-of select="@*"/> <xsl:apply-templates/> </xsl:copy></xsl:template><xsl:template match="m:*[@dir=\'rtl\']" priority="10"> <xsl:apply-templates mode="rtl" select="."/></xsl:template><xsl:template match="@*" mode="rtl"> <xsl:copy-of select="."/> <xsl:attribute name="dir">ltr</xsl:attribute></xsl:template><xsl:template match="*" mode="rtl"> <xsl:copy> <xsl:apply-templates select="@*" mode="rtl"/> <xsl:for-each select="node()"> <xsl:sort data-type="number" order="descending" select="position()"/> <xsl:text> </xsl:text> <xsl:apply-templates mode="rtl" select="."/> </xsl:for-each> </xsl:copy></xsl:template><xsl:template match="@open" mode="rtl"> <xsl:attribute name="close"><xsl:value-of select="."/></xsl:attribute></xsl:template><xsl:template match="@open[.=\'(\']" mode="rtl"> <xsl:attribute name="close">)</xsl:attribute></xsl:template><xsl:template match="@open[.=\')\']" mode="rtl"> <xsl:attribute name="close">(</xsl:attribute></xsl:template><xsl:template match="@open[.=\'[\']" mode="rtl"> <xsl:attribute name="close">]</xsl:attribute></xsl:template><xsl:template match="@open[.=\']\']" mode="rtl"> <xsl:attribute name="close">[</xsl:attribute></xsl:template><xsl:template match="@open[.=\'{\']" mode="rtl"> <xsl:attribute name="close">}</xsl:attribute></xsl:template><xsl:template match="@open[.=\'}\']" mode="rtl"> <xsl:attribute name="close">{</xsl:attribute></xsl:template><xsl:template match="@close" mode="rtl"> <xsl:attribute name="open"><xsl:value-of select="."/></xsl:attribute></xsl:template><xsl:template match="@close[.=\'(\']" mode="rtl"> <xsl:attribute name="open">)</xsl:attribute></xsl:template><xsl:template match="@close[.=\')\']" mode="rtl"> <xsl:attribute name="open">(</xsl:attribute></xsl:template><xsl:template match="@close[.=\'[\']" mode="rtl"> <xsl:attribute name="open">]</xsl:attribute></xsl:template><xsl:template match="@close[.=\']\']" mode="rtl"> <xsl:attribute name="open">[</xsl:attribute></xsl:template><xsl:template match="@close[.=\'{\']" mode="rtl"> <xsl:attribute name="open">}</xsl:attribute></xsl:template><xsl:template match="@close[.=\'}\']" mode="rtl"> <xsl:attribute name="open">{</xsl:attribute></xsl:template><xsl:template match="m:mfrac[@bevelled=\'true\']" mode="rtl"> <m:mrow> <m:msub><m:mi></m:mi><xsl:apply-templates select="*[2]" mode="rtl"/></m:msub> <m:mo>\</m:mo> <m:msup><m:mi></m:mi><xsl:apply-templates select="*[1]" mode="rtl"/></m:msup> </m:mrow></xsl:template><xsl:template match="m:mfrac" mode="rtl"> <xsl:copy> <xsl:apply-templates mode="rtl" select="@*|*"/> </xsl:copy></xsl:template><xsl:template match="m:mroot" mode="rtl"> <m:msup> <m:menclose notation="top right"> <xsl:apply-templates mode="rtl" select="@*|*[1]"/> </m:menclose> <xsl:apply-templates mode="rtl" select="*[2]"/> </m:msup></xsl:template><xsl:template match="m:msqrt" mode="rtl"> <m:menclose notation="top right"> <xsl:apply-templates mode="rtl" select="@*|*[1]"/> </m:menclose></xsl:template><xsl:template match="m:mtable|m:munder|m:mover|m:munderover" mode="rtl" priority="2"> <xsl:copy> <xsl:apply-templates select="@*" mode="rtl"/> <xsl:apply-templates mode="rtl"> </xsl:apply-templates> </xsl:copy></xsl:template><xsl:template match="m:msup" mode="rtl" priority="2"> <m:mmultiscripts> <xsl:apply-templates select="*[1]" mode="rtl"/> <m:mprescripts/> <m:none/> <xsl:apply-templates select="*[2]" mode="rtl"/> </m:mmultiscripts></xsl:template><xsl:template match="m:msub" mode="rtl" priority="2"> <m:mmultiscripts> <xsl:apply-templates select="*[1]" mode="rtl"/> <m:mprescripts/> <xsl:apply-templates select="*[2]" mode="rtl"/> <m:none/> </m:mmultiscripts></xsl:template><xsl:template match="m:msubsup" mode="rtl" priority="2"> <m:mmultiscripts> <xsl:apply-templates select="*[1]" mode="rtl"/> <m:mprescripts/> <xsl:apply-templates select="*[2]" mode="rtl"/> <xsl:apply-templates select="*[3]" mode="rtl"/> </m:mmultiscripts></xsl:template><xsl:template match="m:mmultiscripts" mode="rtl" priority="2"> <m:mmultiscripts> <xsl:apply-templates select="*[1]" mode="rtl"/> <xsl:for-each select="m:mprescripts/following-sibling::*[position() mod 2 = 1]"> <xsl:sort data-type="number" order="descending" select="position()"/> <xsl:apply-templates select="." mode="rtl"/> <xsl:apply-templates select="following-sibling::*[1]" mode="rtl"/> </xsl:for-each> <m:mprescripts/> <xsl:for-each select="m:mprescripts/preceding-sibling::*[position()!=last()][position() mod 2 = 0]"> <xsl:sort data-type="number" order="descending" select="position()"/> <xsl:apply-templates select="." mode="rtl"/> <xsl:apply-templates select="following-sibling::*[1]" mode="rtl"/> </xsl:for-each> </m:mmultiscripts></xsl:template><xsl:template match="m:mmultiscripts[not(m:mprescripts)]" mode="rtl" priority="3"> <m:mmultiscripts> <xsl:apply-templates select="*[1]" mode="rtl"/> <m:mprescripts/> <xsl:for-each select="*[position() mod 2 = 0]"> <xsl:sort data-type="number" order="descending" select="position()"/> <xsl:apply-templates select="." mode="rtl"/> <xsl:apply-templates select="following-sibling::*[1]" mode="rtl"/> </xsl:for-each> </m:mmultiscripts></xsl:template><xsl:template match="text()[.=\'(\']" mode="rtl">)</xsl:template><xsl:template match="text()[.=\')\']" mode="rtl">(</xsl:template><xsl:template match="text()[.=\'{\']" mode="rtl">}</xsl:template><xsl:template match="text()[.=\'}\']" mode="rtl">{</xsl:template><xsl:template match="text()[.=\'<\']" mode="rtl">></xsl:template><xsl:template match="text()[.=\'>\']" mode="rtl"><</xsl:template><xsl:template match="text()[.=\'∈\']" mode="rtl">∋</xsl:template><xsl:template match="text()[.=\'∋\']" mode="rtl">∈</xsl:template><xsl:template match="@notation[.=\'radical\']" mode="rtl"> <xsl:attribute name="notation">top right</xsl:attribute></xsl:template><xsl:template match="m:mlongdiv|m:mstack" mode="rtl"> <m:mrow dir="ltr"> <xsl:apply-templates select="."/> </m:mrow></xsl:template><xsl:template match="m:mstack" priority="11"> <xsl:variable name="m"> <m:mtable columnspacing="0em"> <xsl:copy-of select="@align"/> <xsl:variable name="t"> <xsl:apply-templates select="*" mode="mstack1"> <xsl:with-param name="p" select="0"/> </xsl:apply-templates> </xsl:variable> <xsl:variable name="maxl"> <xsl:for-each select="c:node-set($t)/*/@l"> <xsl:sort data-type="number" order="descending"/> <xsl:if test="position()=1"> <xsl:value-of select="."/> </xsl:if> </xsl:for-each> </xsl:variable> <xsl:for-each select="c:node-set($t)/*[not(@class=\'mscarries\') or following-sibling::*[1]/@class=\'mscarries\']"><xsl:variable name="c" select="preceding-sibling::*[1][@class=\'mscarries\']"/> <xsl:text> </xsl:text> <m:mtr> <xsl:copy-of select="@class[.=\'msline\']"/> <xsl:variable name="offset" select="$maxl - @l"/> <xsl:choose> <xsl:when test="@class=\'msline\' and @l=\'*\'"> <xsl:variable name="msl" select="*[1]"/> <xsl:for-each select="(//node())[position()<=$maxl]"> <xsl:copy-of select="$msl"/> </xsl:for-each> </xsl:when> <xsl:when test="$c"> <xsl:variable name="ldiff" select="$c/@l - @l"/> <xsl:variable name="loffset" select="$maxl - $c/@l"/> <xsl:for-each select="(//*)[position()<= $offset]"> <xsl:variable name="pn" select="position()"/> <xsl:variable name="cy" select="$c/*[position()=$pn - $loffset]"/> <m:mtd> <xsl:if test="$cy/*"> <m:mover><m:mphantom><m:mn>0</m:mn></m:mphantom><m:mpadded width="0em" lspace="-0.5width"> <xsl:copy-of select="$cy/*"/></m:mpadded></m:mover> </xsl:if> </m:mtd> </xsl:for-each> <xsl:for-each select="*"> <xsl:variable name="pn" select="position()"/> <xsl:variable name="cy" select="$c/*[position()=$pn + $ldiff]"/> <xsl:copy> <xsl:copy-of select="@*"/> <xsl:variable name="b"> <xsl:choose> <xsl:when test="not(string($cy/@crossout) or $cy/@crossout=\'none\')"><xsl:copy-of select="*"/></xsl:when> <xsl:otherwise> <m:menclose notation="{$cy/@crossout}"><xsl:copy-of select="*"/></m:menclose> </xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:choose> <xsl:when test="$cy/m:none or not($cy/*)"><xsl:copy-of select="$b"/></xsl:when> <xsl:when test="not(string($cy/@location)) or $cy/@location=\'n\'"> <m:mover> <xsl:copy-of select="$b"/><m:mpadded width="0em" lspace="-0.5width"> <xsl:copy-of select="$cy/*"/> </m:mpadded> </m:mover> </xsl:when> <xsl:when test="$cy/@location=\'nw\'"> <m:mmultiscripts><xsl:copy-of select="$b"/><m:mprescripts/><m:none/><m:mpadded lspace="-1width" width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded></m:mmultiscripts> </xsl:when> <xsl:when test="$cy/@location=\'s\'"> <m:munder><xsl:copy-of select="$b"/><m:mpadded width="0em" lspace="-0.5width"><xsl:copy-of select="$cy/*"/></m:mpadded></m:munder> </xsl:when> <xsl:when test="$cy/@location=\'sw\'"> <m:mmultiscripts><xsl:copy-of select="$b"/><m:mprescripts/><m:mpadded lspace="-1width" width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded><m:none/></m:mmultiscripts> </xsl:when> <xsl:when test="$cy/@location=\'ne\'"> <m:msup><xsl:copy-of select="$b"/><m:mpadded width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded></m:msup> </xsl:when> <xsl:when test="$cy/@location=\'se\'"> <m:msub><xsl:copy-of select="$b"/><m:mpadded width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded></m:msub> </xsl:when> <xsl:when test="$cy/@location=\'w\'"> <m:msup><m:mrow/><m:mpadded lspace="-1width" width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded></m:msup> <xsl:copy-of select="$b"/> </xsl:when> <xsl:when test="$cy/@location=\'e\'"> <xsl:copy-of select="$b"/> <m:msup><m:mrow/><m:mpadded width="0em"><xsl:copy-of select="$cy/*"/></m:mpadded></m:msup> </xsl:when> <xsl:otherwise> <xsl:copy-of select="$b"/> </xsl:otherwise> </xsl:choose> </xsl:copy> </xsl:for-each> </xsl:when> <xsl:otherwise> <xsl:for-each select="(//*)[position()<= $offset]"><m:mtd/></xsl:for-each> <xsl:copy-of select="*"/> </xsl:otherwise> </xsl:choose> </m:mtr> </xsl:for-each> </m:mtable></xsl:variable><xsl:apply-templates mode="ml" select="c:node-set($m)"/></xsl:template><xsl:template match="*" mode="ml"> <xsl:copy> <xsl:copy-of select="@*"/> <xsl:apply-templates mode="ml"/> </xsl:copy></xsl:template><xsl:template mode="ml" match="m:mtr[following-sibling::*[1][@class=\'msline\']]"> <m:mtr> <xsl:copy-of select="@*"/> <xsl:variable name="m" select="following-sibling::*[1]/m:mtd"/> <xsl:for-each select="m:mtd"> <xsl:variable name="p" select="position()"/> <m:mtd> <xsl:copy-of select="@*"/> <xsl:choose> <xsl:when test="$m[$p]/m:mpadded"> <m:menclose notation="bottom"> <m:mpadded depth=".1em" height="1em" width=".4em"> <xsl:copy-of select="*"/> </m:mpadded> </m:menclose> </xsl:when> <xsl:otherwise> <xsl:copy-of select="*"/> </xsl:otherwise> </xsl:choose> </m:mtd> </xsl:for-each> </m:mtr></xsl:template><xsl:template mode="ml" match="m:mtr[not(preceding-sibling::*)][@class=\'msline\']" priority="3"> <m:mtr> <xsl:copy-of select="@*"/> <xsl:for-each select="m:mtd"> <m:mtd> <xsl:copy-of select="@*"/> <xsl:if test="m:mpadded"> <m:menclose notation="bottom"> <m:mpadded depth=".1em" height="1em" width=".4em"> <m:mspace width=".2em"/> </m:mpadded> </m:menclose> </xsl:if> </m:mtd> </xsl:for-each> </m:mtr></xsl:template><xsl:template mode="ml" match="m:mtr[@class=\'msline\']" priority="2"/><xsl:template mode="mstack1" match="*"> <xsl:param name="p"/> <xsl:param name="maxl" select="0"/> <m:mtr l="{1 + $p}"> <xsl:if test="ancestor::mstack[1]/@stackalign=\'left\'"> <xsl:attribute name="l"><xsl:value-of select="$p"/></xsl:attribute> </xsl:if> <m:mtd><xsl:apply-templates select="."/></m:mtd> </m:mtr></xsl:template><xsl:template mode="mstack1" match="m:msrow"> <xsl:param name="p"/> <xsl:param name="maxl" select="0"/> <xsl:variable name="align1" select="ancestor::m:mstack[1]/@stackalign"/> <xsl:variable name="align"> <xsl:choose> <xsl:when test="string($align1)=\'\'">decimalpoint</xsl:when> <xsl:otherwise><xsl:value-of select="$align1"/></xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="row"> <xsl:apply-templates mode="mstack1" select="*"> <xsl:with-param name="p" select="0"/> </xsl:apply-templates> </xsl:variable> <xsl:text> </xsl:text> <xsl:variable name="l1"> <xsl:choose> <xsl:when test="$align=\'decimalpoint\' and m:mn"> <xsl:for-each select="c:node-set($row)/m:mtr[m:mtd/m:mn][1]"> <xsl:value-of select="number(sum(@l))+count(preceding-sibling::*/@l)"/> </xsl:for-each> </xsl:when> <xsl:when test="$align=\'right\' or $align=\'decimalpoint\'"> <xsl:value-of select="count(c:node-set($row)/m:mtr/m:mtd)"/> </xsl:when> <xsl:otherwise> <xsl:value-of select="0"/> </xsl:otherwise> </xsl:choose> </xsl:variable> <m:mtr class="msrow" l="{number($l1) + number(sum(@position)) +$p}"> <xsl:copy-of select="c:node-set($row)/m:mtr/*"/> </m:mtr></xsl:template><xsl:template mode="mstack1" match="m:mn"> <xsl:param name="p"/> <xsl:variable name="align1" select="ancestor::m:mstack[1]/@stackalign"/> <xsl:variable name="dp1" select="ancestor::*[@decimalpoint][1]/@decimalpoint"/> <xsl:variable name="align"> <xsl:choose> <xsl:when test="string($align1)=\'\'">decimalpoint</xsl:when> <xsl:otherwise><xsl:value-of select="$align1"/></xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:variable name="dp"> <xsl:choose> <xsl:when test="string($dp1)=\'\'">.</xsl:when> <xsl:otherwise><xsl:value-of select="$dp1"/></xsl:otherwise> </xsl:choose> </xsl:variable> <m:mtr l="$p"> <xsl:variable name="mn" select="normalize-space(.)"/> <xsl:variable name="len" select="string-length($mn)"/> <xsl:choose> <xsl:when test="$align=\'right\' or ($align=\'decimalpoint\' and not(contains($mn,$dp)))"> <xsl:attribute name="l"><xsl:value-of select="$p + $len"/></xsl:attribute> </xsl:when> <xsl:when test="$align=\'center\'"> <xsl:attribute name="l"><xsl:value-of select="round(($p + $len) div 2)"/></xsl:attribute> </xsl:when> <xsl:when test="$align=\'decimalpoint\'"> <xsl:attribute name="l"><xsl:value-of select="$p + string-length(substring-before($mn,$dp))"/></xsl:attribute> </xsl:when> </xsl:choose> <xsl:for-each select="(//node())[position() <=$len]"> <xsl:variable name="pos" select="position()"/> <m:mtd><m:mn><xsl:value-of select="substring($mn,$pos,1)"/></m:mn></m:mtd> </xsl:for-each> </m:mtr></xsl:template><xsl:template match="m:msgroup" mode="mstack1"> <xsl:param name="p"/> <xsl:variable name="s" select="number(sum(@shift))"/> <xsl:variable name="thisp" select="number(sum(@position))"/> <xsl:for-each select="*"> <xsl:apply-templates mode="mstack1" select="."> <xsl:with-param name="p" select="number($p)+$thisp+(position()-1)*$s"/> </xsl:apply-templates> </xsl:for-each></xsl:template><xsl:template match="m:msline" mode="mstack1"> <xsl:param name="p"/> <xsl:variable name="align1" select="ancestor::m:mstack[1]/@stackalign"/> <xsl:variable name="align"> <xsl:choose> <xsl:when test="string($align1)=\'\'">decimalpoint</xsl:when> <xsl:otherwise><xsl:value-of select="$align1"/></xsl:otherwise> </xsl:choose> </xsl:variable> <m:mtr class="msline"> <xsl:attribute name="l"> <xsl:choose> <xsl:when test="not(string(@length)) or @length=0">*</xsl:when> <xsl:when test="string($align)=\'right\' or string($align)=\'decimalpoint\' "><xsl:value-of select="$p+ @length"/></xsl:when> <xsl:otherwise><xsl:value-of select="$p"/></xsl:otherwise> </xsl:choose> </xsl:attribute> <xsl:variable name="w"> <xsl:choose> <xsl:when test="@mslinethickness=\'thin\'">0.1em</xsl:when> <xsl:when test="@mslinethickness=\'medium\'">0.15em</xsl:when> <xsl:when test="@mslinethickness=\'thick\'">0.2em</xsl:when> <xsl:when test="@mslinethickness"><xsl:value-of select="@mslinethickness"/></xsl:when> <xsl:otherwise>0.15em</xsl:otherwise> </xsl:choose> </xsl:variable> <xsl:choose> <xsl:when test="not(string(@length)) or @length=0"> <m:mtd class="mslinemax"> <m:mpadded lspace="-0.2em" width="0em" height="0em"> <m:mfrac linethickness="{$w}"> <m:mspace width=".4em"/> <m:mrow/> </m:mfrac> </m:mpadded> </m:mtd> </xsl:when> <xsl:otherwise> <xsl:variable name="l" select="@length"/> <xsl:for-each select="(//node())[position()<=$l]"> <m:mtd class="msline"> <m:mpadded lspace="-0.2em" width="0em" height="0em"> <m:mfrac linethickness="{$w}"> <m:mspace width=".4em"/> <m:mrow/> </m:mfrac> </m:mpadded> </m:mtd> </xsl:for-each> </xsl:otherwise> </xsl:choose> </m:mtr></xsl:template><xsl:template match="m:mscarries" mode="mstack1"> <xsl:param name="p"/> <xsl:variable name="align1" select="ancestor::m:mstack[1]/@stackalign"/> <xsl:variable name="l1"> <xsl:choose> <xsl:when test="string($align1)=\'left\'">0</xsl:when> <xsl:otherwise><xsl:value-of select="count(*)"/></xsl:otherwise> </xsl:choose> </xsl:variable> <m:mtr class="mscarries" l="{$p + $l1 + sum(@position)}"> <xsl:apply-templates select="*" mode="msc"/> </m:mtr></xsl:template><xsl:template match="*" mode="msc"> <m:mtd> <xsl:copy-of select="../@location|../@crossout"/> <xsl:choose> <xsl:when test="../@scriptsizemultiplier"> <m:mstyle mathsize="{round(../@scriptsizemultiplier div .007)}%"> <xsl:apply-templates select="."/> </m:mstyle> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="."/> </xsl:otherwise> </xsl:choose> </m:mtd></xsl:template><xsl:template match="m:mscarry" mode="msc"> <m:mtd> <xsl:copy-of select="@location|@crossout"/> <xsl:choose> <xsl:when test="../@scriptsizemultiplier"> <m:mstyle mathsize="{round(../@scriptsizemultiplier div .007)}%"> <xsl:apply-templates/> </m:mstyle> </xsl:when> <xsl:otherwise> <xsl:apply-templates/> </xsl:otherwise> </xsl:choose> </m:mtd></xsl:template><xsl:template match="m:mlongdiv" priority="11"> <xsl:variable name="ms"> <m:mstack> <xsl:copy-of select="(ancestor-or-self::*/@decimalpoint)[last()]"/> <xsl:choose> <xsl:when test="@longdivstyle=\'left)(right\'"> <m:msrow> <m:mrow><xsl:copy-of select="*[1]"/></m:mrow> <m:mo>)</m:mo> <xsl:copy-of select="*[3]"/> <m:mo>(</m:mo> <xsl:copy-of select="*[2]"/> </m:msrow> </xsl:when> <xsl:when test="@longdivstyle=\'left/\right\'"> <m:msrow> <m:mrow><xsl:copy-of select="*[1]"/></m:mrow> <m:mo>/</m:mo> <xsl:copy-of select="*[3]"/> <m:mo></m:mo> <xsl:copy-of select="*[2]"/> </m:msrow> </xsl:when> <xsl:when test="@longdivstyle=\':right=right\'"> <m:msrow> <xsl:copy-of select="*[3]"/> <m:mo>:</m:mo> <xsl:copy-of select="*[1]"/> <m:mo>=</m:mo> <xsl:copy-of select="*[2]"/> </m:msrow> </xsl:when> <xsl:when test="@longdivstyle=\'stackedrightright\' or @longdivstyle=\'mediumstackedrightright\' or @longdivstyle=\'shortstackedrightright\' or @longdivstyle=\'stackedleftleft\' "> <xsl:attribute name="align">top</xsl:attribute> <xsl:copy-of select="*[3]"/> </xsl:when> <xsl:when test="@longdivstyle=\'stackedleftlinetop\'"> <xsl:copy-of select="*[2]"/> <m:msline length="{string-length(*[3])-1}"/> <m:msrow> <m:mrow> <m:menclose notation="bottom right"> <xsl:copy-of select="*[1]"/> </m:menclose> </m:mrow> <xsl:copy-of select="*[3]"/> </m:msrow> </xsl:when> <xsl:when test="@longdivstyle=\'righttop\'"> <xsl:copy-of select="*[2]"/> <m:msline length="{string-length(*[3])}"/> <m:msrow> <xsl:copy-of select="*[3]"/> <m:menclose notation="top left bottom"> <xsl:copy-of select="*[1]"/></m:menclose> </m:msrow> </xsl:when> <xsl:otherwise> <xsl:copy-of select="*[2]"/> <m:msline length="{string-length(*[3])}"/> <m:msrow> <m:mrow><xsl:copy-of select="*[1]"/></m:mrow> <m:mo>)</m:mo> <xsl:copy-of select="*[3]"/> </m:msrow> </xsl:otherwise> </xsl:choose> <xsl:copy-of select="*[position()>3]"/> </m:mstack> </xsl:variable> <xsl:choose> <xsl:when test="@longdivstyle=\'stackedrightright\'"> <m:menclose notation="right"> <xsl:apply-templates select="c:node-set($ms)"/> </m:menclose> <m:mtable align="top"> <m:mtr> <m:menclose notation="bottom"> <xsl:copy-of select="*[1]"/> </m:menclose> </m:mtr> <m:mtr> <mtd><xsl:copy-of select="*[2]"/></mtd> </m:mtr> </m:mtable> </xsl:when> <xsl:when test="@longdivstyle=\'mediumstackedrightright\'"> <xsl:apply-templates select="c:node-set($ms)"/> <m:menclose notation="left"> <m:mtable align="top"> <m:mtr> <m:menclose notation="bottom"> <xsl:copy-of select="*[1]"/> </m:menclose> </m:mtr> <m:mtr> <mtd><xsl:copy-of select="*[2]"/></mtd> </m:mtr> </m:mtable> </m:menclose> </xsl:when> <xsl:when test="@longdivstyle=\'shortstackedrightright\'"> <xsl:apply-templates select="c:node-set($ms)"/> <m:mtable align="top"> <m:mtr> <m:menclose notation="left bottom"> <xsl:copy-of select="*[1]"/> </m:menclose> </m:mtr> <m:mtr> <mtd><xsl:copy-of select="*[2]"/></mtd> </m:mtr> </m:mtable> </xsl:when> <xsl:when test="@longdivstyle=\'stackedleftleft\'"> <m:mtable align="top"> <m:mtr> <m:menclose notation="bottom"> <xsl:copy-of select="*[1]"/> </m:menclose> </m:mtr> <m:mtr> <mtd><xsl:copy-of select="*[2]"/></mtd> </m:mtr> </m:mtable> <m:menclose notation="left"> <xsl:apply-templates select="c:node-set($ms)"/> </m:menclose> </xsl:when> <xsl:otherwise> <xsl:apply-templates select="c:node-set($ms)"/> </xsl:otherwise> </xsl:choose></xsl:template><xsl:template match="m:menclose[@notation=\'madruwb\']" mode="rtl"> <m:menclose notation="bottom right"> <xsl:apply-templates mode="rtl"/> </m:menclose></xsl:template></xsl:stylesheet>';var b;if(window.XSLTProcessor){if(!d.ParseXML){d.ParseXML=d.createParser()}d.mml3XSLT=new XSLTProcessor();d.mml3XSLT.importStylesheet(d.ParseXML(e))}else{if(MathJax.Hub.Browser.isMSIE){if(MathJax.Hub.Browser.versionAtLeast("9.0")||(document.documentMode||0)>=9){b=new ActiveXObject("Msxml2.FreeThreadedDOMDocument");b.loadXML(e);var a=new ActiveXObject("Msxml2.XSLTemplate");a.stylesheet=b;d.mml3XSLT={mml3:a.createProcessor(),transformToDocument:function(h){this.mml3.input=h;this.mml3.transform();return this.mml3.output}}}else{b=d.createMSParser();b.async=false;b.loadXML(e);d.mml3XSLT={mml3:b,transformToDocument:function(h){return h.documentElement.transformNode(this.mml3)}}}}else{d.mml3XSLT=null}}MathJax.Ajax.Styles({".MathJax .mi, .MathJax .mo, .MathJax .mn, .MathJax .mtext":{direction:"ltr",display:"inline-block"},".MathJax .ms, .MathJax .mspace, .MathJax .mglyph":{direction:"ltr",display:"inline-block"}});MathJax.Hub.Startup.signal.Post("MathML mml3.js Ready")});MathJax.Ajax.loadComplete("[MathJax]/extensions/MathML/mml3.js"); | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/gis/gdal/raster/band.py | from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.shortcuts import numpy
from django.utils.encoding import force_str
from .const import (
GDAL_COLOR_TYPES,
GDAL_INTEGER_TYPES,
GDAL_PIXEL_TYPES,
GDAL_TO_CTYPES,
)
class GDALBand(GDALRasterBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_str(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr,
c_int(approximate),
byref(smin),
byref(smax),
byref(smean),
byref(sstd),
c_void_p(),
c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError("Nodata value must be numeric or None.")
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def color_interp(self, as_string=False):
"""Return the GDAL color interpretation for this band."""
color = capi.get_band_color_interp(self._ptr)
if as_string:
color = GDAL_COLOR_TYPES[color]
return color
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
offset = offset or (0, 0)
size = size or (self.width - offset[0], self.height - offset[1])
shape = shape or size
if any(x <= 0 for x in size):
raise ValueError("Offset too big for this raster.")
if size[0] > self.width or size[1] > self.height:
raise ValueError("Size is larger than raster.")
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (
numpy and isinstance(data, numpy.ndarray)
):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(
self._ptr,
access_flag,
offset[0],
offset[1],
size[0],
size[1],
byref(data_array),
shape[0],
shape[1],
self.datatype(),
0,
0,
)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
super().__init__()
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException("Unable to get band index %d" % index) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/mobile/app/compat.js.uncompressed.js | This is an optimized version of Dojo, built for deployment and not for
development. To get sources and documentation, please visit:
http://dojotoolkit.org
*/
//>>built
require({cache:{
'dojox/main':function(){
define(["dojo/_base/kernel"], function(dojo) {
// module:
// dojox/main
// summary:
// The dojox package main module; dojox package is somewhat unusual in that the main module currently just provides an empty object.
return dojo.dojox;
});
},
'dojox/mobile/compat':function(){
define([
"dojo/_base/lang",
"dojo/_base/sniff"
], function(lang, has){
var dm = lang.getObject("dojox.mobile", true);
if(!has("webkit")){
var s = "dojox/mobile/_compat"; // assign to a variable so as not to be picked up by the build tool
require([s]);
}
return dm;
});
},
'dijit/main':function(){
define("dijit/main", [
"dojo/_base/kernel"
], function(dojo){
// module:
// dijit
// summary:
// The dijit package main module
return dojo.dijit;
});
}}});
require(["dojo/i18n"], function(i18n){
i18n._preloadLocalizations("dojox/mobile/app/nls/compat", []);
});
// wrapped by build app
define("dojox/mobile/app/compat", ["dijit","dojo","dojox","dojo/require!dojox/mobile/compat"], function(dijit,dojo,dojox){
dojo.provide("dojox.mobile.app.compat");
dojo.require("dojox.mobile.compat");
// summary:
// CSS3 compatibility module for apps
// description:
// This module provides support for some of the CSS3 features to djMobile
// for non-CSS3 browsers, such as IE or Firefox.
// If you load this module, it directly replaces some of the methods of
// djMobile instead of subclassing. This way, html pages remains the same
// regardless of whether this compatibility module is used or not.
// Recommended usage is as follows. the code below loads dojox.mobile.compat
// only when isWebKit is true.
//
// dojo.require("dojox.mobile");
// dojo.requireIf(!dojo.isWebKit, "dojox.mobile.appCompat");
dojo.extend(dojox.mobile.app.AlertDialog, {
_doTransition: function(dir){
console.log("in _doTransition and this = ", this);
var h = dojo.marginBox(this.domNode.firstChild).h;
var bodyHeight = this.controller.getWindowSize().h;
var high = bodyHeight - h;
var low = bodyHeight;
var anim1 = dojo.fx.slideTo({
node: this.domNode,
duration: 400,
top: {start: dir < 0 ? high : low, end: dir < 0 ? low: high}
});
var anim2 = dojo[dir < 0 ? "fadeOut" : "fadeIn"]({
node: this.mask,
duration: 400
});
var anim = dojo.fx.combine([anim1, anim2]);
var _this = this;
dojo.connect(anim, "onEnd", this, function(){
if(dir < 0){
_this.domNode.style.display = "none";
dojo.destroy(_this.domNode);
dojo.destroy(_this.mask);
}
});
anim.play();
}
});
dojo.extend(dojox.mobile.app.List, {
deleteRow: function(){
console.log("deleteRow in compat mode", row);
var row = this._selectedRow;
// First make the row invisible
// Put it back where it came from
dojo.style(row, {
visibility: "hidden",
minHeight: "0px"
});
dojo.removeClass(row, "hold");
// Animate reducing it's height to zero, then delete the data from the
// array
var height = dojo.contentBox(row).h;
dojo.animateProperty({
node: row,
duration: 800,
properties: {
height: {start: height, end: 1},
paddingTop: {end: 0},
paddingBottom: {end: 0}
},
onEnd: this._postDeleteAnim
}).play();
}
});
if(dojox.mobile.app.ImageView && !dojo.create("canvas").getContext){
dojo.extend(dojox.mobile.app.ImageView, {
buildRendering: function(){
this.domNode.innerHTML =
"ImageView widget is not supported on this browser."
+ "Please try again with a modern browser, e.g. "
+ "Safari, Chrome or Firefox";
this.canvas = {};
},
postCreate: function(){}
});
}
if(dojox.mobile.app.ImageThumbView){
dojo.extend(dojox.mobile.app.ImageThumbView, {
place: function(node, x, y){
dojo.style(node, {
top: y + "px",
left: x + "px",
visibility: "visible"
});
}
})
}
}); | PypiClean |
/DUlib-0.9.3.tar.gz/DUlib-0.9.3/du/conv/models.py | # Todo:
# - ConvFFNet and others(?) only works with 1 channel input
# to the first conv layer (so only b&w images??).
# - ConvFFNet likely breaks(?) with strides and paddings other
# than the default
# - add options to change for example the nonlinearities.
# - check stuff in init of classes with asserts.
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import du.utils
from du.models import FFNet_, denseFFhidden
__author__ = 'Scott Simmons'
__version__ = '0.9.3'
__status__ = 'Development'
__date__ = '12/03/20'
__copyright__ = """
Copyright 2019-2020 Scott Simmons
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__= 'Apache 2.0'
def metalayer(channels, kernels, nonlin, **kwargs):
"""A metalayer for a convolutional network.
This returns a ~convolutional metalayer~ consisting of a single
convolutional layer, followed by a nonlinearity, followed by
a single max-pooling layer.
Let the input to the meta-layer returned by this function be
a tensor with shape defined by `(H_in, W_in)`, and let `(H_out,`
`W_out)` be the shape of the resulting output tensor. Then the
default `strides` and `paddings` lead to the following.
If `kernels[0]`, which is the size of the square convolutional
kernel, is odd, then the convolutional layer does not modify
size (since by default the padding is (`kernels[0]`-1)/2 and
the stride is 1). Meanwhile the pooling layer has (default)
padding 0 and stride `kernels[1]`; hence it reduces both the
height and the width by a factor of `kernels[1]` if `kernels[1]`
is divides both height and width. More generally, We have:
!Case 1!: `kernels[0]` is odd
If `H_in` and `W_in` are both divisible by `kernels[1]`, then
`H_out = H_in / kernels[1]`, and
`W_out = W_in / kernels[1]`.
>>> `ml, out_size = metalayer((1,16), (5,2), nn.ReLU())`
>>> `ml`
Sequential(
(0): Conv2d(1, 16, k...=(5, 5), st...=(1, 1), pa...=(2, 2))
(1): BatchNorm2d(16, ...)
(2): ReLU()
(3): MaxPool2d(kernel_size=2, stride=2, padding=0, ...)
)
>>> `ml(torch.rand(1, 1, 48, 64)).size()`
torch.Size([1, 16, 24, 32])
>>> `out_size(48, 64)`
(24, 32)
If one or both of `H_in` and `W_in` are not divisible by `kernels`
`[1]`, then
`H_out = floor(H_in/kernels[1])`, and
`W_out = floor(W_in/kernels[1])`.
>>> `ml, out_size = metalayer((1,16), (5,3), nn.ReLU())`
>>> `ml(torch.rand(1, 1, 48, 64)).size()`
torch.Size([1, 16, 16, 21])
>>> `out_size(48, 64)`
(16, 21)
>>> `ml, out_size = metalayer((1,16), (5,2), nn.ReLU())`
>>> `ml(torch.rand(1, 1, 47, 64)).size()`
torch.Size([1, 16, 23, 32])
>>> `out_size(47, 64)`
(23, 32)
!Case 2! `kernels[0]` is even:
If this case, the height and the width of data both grow by 1
in moving through the convolution layer; hence
`H_out = floor((H_in + 1)/kernels[1])`, and
`W_out = floor((W_in + 1)/kernels[1])`.
>>> `ml, out_size = metalayer((1,16), (6,2), nn.ReLU())`
>>> `ml(torch.rand(1, 1, 47, 64)).size()`
torch.Size([1, 16, 24, 32])
>>> `out_size(47, 64)`
(24, 32)
Therefore, in any case that assumes the default `strides` and
`paddings`, we have
`H_out = floor((H_in + (kernel[0]+1) mod 2))/kernels[1])`, and
`W_out = floor((W_in + (kernel[0]+1) mod 2))/kernels[1])`.
(Here we have excluded the case `kernels[1]` = 1 since, then,
the pooling layer has no effect.)
>>> `ml, out_size = metalayer((1,16), (7,2), nn.ReLU())`
>>> `ml(torch.rand(1, 1, 47, 64)).size()`
torch.Size([1, 16, 23, 32])
>>> `out_size(47, 64)`
(23, 32)
>>> `ml, out_size = metalayer((1,16), (7,3), nn.ReLU())`
>>> `ml(torch.rand(1, 1, 47, 64)).size()`
torch.Size([1, 16, 15, 21])
>>> `out_size(47, 64)`
(15, 21)
Args:
$channels$ (`Tuple[int]`): This tuple is interpreted as `(in_`
`channels, out_channels)` where `in_channels` and `out_`
`channels` are those for the convolutional layer.
$kernels$ (`Tuple[int]`): The first integer determines the
width and height of the convolutional kernel; the sec-
ond, the same for the max-pooling kernel.
$nonlin$ (`nn.Module`): The nonlinearity.
Kwargs:
$strides$ (`Tuple[int]`): The first int is the stride of the
convolutional layer; the second is that of the pooling
layer. Default: `(1, kernels[1])`.
$paddings$ (`Tuple[int]`): The first int is the padding for the
convolutional layer; the second is that for the pooling
layer. Default: `(int(kernels[0]/2), 0)`.
$batchnorm$ ('(str, kwargs)'): A tuple which, if not emp-
ty, results in a batch normalization layer being inser-
ted in the metalayer. If the string in the first posit-
ion is 'before', respectively 'after', then batch nor-
malization takes place before, resp. after, applying
the nonlinearity. Keyword arguments for `torch.nn.Batch`
`Norm2d` can also be supplied in the form of a `dict`.
Default: `('before',)`.
$dropout$ (`float`): If greater than zero, add a dropout layer
with this probablity before each nonlinearity. Def: `0`.
>>> `bn = ('before',{'momentum':.99})
>>> `ml,_= metalayer((1,16), (7,3), nn.ReLU(), batchnorm=bn)`
>>> `ml(torch.rand(1, 1, 47, 64)).size()`
torch.Size([1, 16, 15, 21])
Returns:
`(nn.Sequential, function)`. The metalayer tupled with a fun-
tion that mapps `H_in, W_in` to `H_out, W_out`.
"""
# this is metalayer
du.utils._check_kwargs(kwargs,['strides','paddings','batchnorm','dropout'])
strides = kwargs.get('strides',(1,kernels[1]))
paddings = kwargs.get('paddings',(int(kernels[0]/2),0))
batchnorm = kwargs.get('batchnorm', ('before',))
dropout = kwargs.get('dropout', 0)
if dropout > 0:
if batchnorm:
if len(batchnorm) == 1: bn_kwargs = {} # batchnorm kwargs
else:
bn_kwargs = batchnorm[1]
assert isinstance(bn_kwargs,dict),\
'second element of batchnorm must be a dict'
if kernels[1] > 1:
ml=nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nn.BatchNorm2d(num_features=channels[1], **bn_kwargs),
nn.Dropout(dropout),
nonlin,
nn.MaxPool2d(kernel_size=kernels[1], stride=strides[1],
padding=paddings[1]))
else:
ml = nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nn.BatchNorm2d(num_features=channels[1], **bn_kwargs),
nonlin)
else:
if kernels[1] > 1:
ml=nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nn.Dropout(dropout),
nonlin,
nn.MaxPool2d(kernel_size=kernels[1], stride=strides[1],
padding=paddings[1]))
else:
ml = nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nonlin)
else:
if batchnorm:
if len(batchnorm) == 1: bn_kwargs = {} # batchnorm kwargs
else:
bn_kwargs = batchnorm[1]
assert isinstance(bn_kwargs,dict),\
'second element of batchnorm must be a dict'
if kernels[1] > 1:
ml=nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nn.BatchNorm2d(num_features=channels[1], **bn_kwargs),
nonlin,
nn.MaxPool2d(kernel_size=kernels[1], stride=strides[1],
padding=paddings[1]))
else:
ml = nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nn.BatchNorm2d(num_features=channels[1], **bn_kwargs),
nonlin)
else:
if kernels[1] > 1:
ml=nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nonlin,
nn.MaxPool2d(kernel_size=kernels[1], stride=strides[1],
padding=paddings[1]))
else:
ml = nn.Sequential(
nn.Conv2d(in_channels=channels[0], out_channels=channels[1],
kernel_size=kernels[0], stride=strides[0], padding=paddings[0]),
nonlin)
def out_size(height, width):
return tuple(ml(torch.randn(1,channels[0],height,width)).size()[2:])
#return int((height + (kernels[0] + 1) % 2) / kernels[1]),\
# int((width + (kernels[0] + 1) % 2) / kernels[1])
return ml, out_size
def convFFhidden(channels, conv_kernels, pool_kernels, **kwargs):
"""Compose convolutional metalayers.
This composes the specified convolutional metalaters into a
block for use in the hidden part a feed-forward neural net.
Let `n` denote the number of specified metalayers; that is,
`n = len(conv_kernels) = len(pool_kernels) = len(channels)-1`.
Args:
$channels$ (`Tuple[int]`): A tuple of length `n+1` the first ent-
ry of which is `in_channels` for the first metalayer's
convolutional part; the rest of the entries are the su-
ccessive `out_channels` for the convolutional part of the
first meta-layer, the second meta-layer, etc.
$conv_kernels$ (`Tuple[int]`): A tuple of length `n` holding the
kernel size for the convolution part the successive
metalayer.
$pool_kernels$ (`Tuple[int]`): A tuple of length `n` holding
the kernel size for the pooling layer of successive
metalayer.
Kwargs:
$nonlins$ (`nn.Module`): The nonlinearities to compose bet-
ween meta-layers. Default: `nn.ReLU()`.
$batchnorm$ (`(str, kwargs)`): A tuple which, if not empty, re-
sults in a batch normalization layer being inserted in
each convolutional metalayer. If the string in the
first position is 'before', resp. 'after', then batch
normalization takes place before, resp. after, the
nonlinearity in each convolutional metalayer. Keywords
for `torch.nn.BatchNorm2d` can be supplied in the form a
`dict` and included as the second element of this tuple;
those will be applied in each convolutional metalayer's
batch normalization layer. Default: `('before',)`.
$dropout$ (`float`): If greater than zero, add a dropout layer
with this probablity before each nonlinearity. Def: `0`.
Returns:
`(nn.Sequential, function)`. The block consisting of the com-
posed metalayers tupled with a function mapping `W_in,`
`H_in` to `W_out, H_out` where `(W_in, H_in)` is the shape of
an input to the block and `(W_out, H_out)` is the corres-
ponding output.
>>> `convFFhidden((1,32, 64), (5,3), (2,2), batchnorm=())`
(Sequential(
(0): Sequential(
(0): Conv2d(1, 32, kernel_size=(5, 5), ...)
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, ...)
)
(1): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), ...)
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, ...)
)
), ...)
"""
du.utils._check_kwargs(kwargs,['nonlins','batchnorm','dropout'])
nonlins = kwargs.get('nonlin',nn.ReLU())
dropout = kwargs.get('dropout', 0)
batchnorm = kwargs.get('batchnorm', ('before',))
assert len(channels)-1 == len(conv_kernels) == len(pool_kernels)
layers,funcs=list(
zip(*[metalayer(chans,kerns,nonlins,batchnorm=batchnorm,dropout=dropout)
for chans, kerns in zip(
zip(channels[:-1],channels[1:]),
zip(conv_kernels, pool_kernels))]))
return nn.Sequential(*layers), functools.reduce(
lambda f,g: lambda x,y:g(*f(x,y)), funcs, lambda x,y:(x,y))
class ConvFFNet(FFNet_):
"""Meta-layered convolutional net.
Builds a convolutional net consisting of the composition of
convolutional metalayers followed by dense layers.
"""
def __init__(self, in_size, n_out, channels, widths, **kwargs):
"""Constructor.
Args:
$in_size$ (`Tuple[int]`): A tuple (height, width) holding
the height and width of each input (in pixels, for
images).
$n_out$ (`int`): Number of outputs from the model in its
entirety. This would be 10 to say classify digits,
or 1 for a regression problem.
$channels$ (`Tuple[int]`): The first entry sets `in_channels`
for the first metalayer's convolutional part; the
rest of the entries are the successive `out_chann`
`els` for the convolutional part of the first meta-
layer, the second metalayer, etc.
$widths$ (`Tuple[int]`): The widths (no. of nodes) in the
successive layers of the dense part.
Kwargs:
$conv_kernels$ (`Tuple[int]`): Default: `(len(channels)-1)*[5]`
$pool_kernels$ (`Tuple[int]`): Default: `(len(channels)-1)*[2]`
$nonlins$ (`Tuple[nn.Module]`): A length 2 tuple determin-
ing the nonlinearities for, resp., the convolution-
al and the dense parts of the network. Default: `(nn`
`.ReLU(), nn.ReLU())`.
$batchnorm$ (`(str, kwargs)`): A tuple which, if not empty,
results in a batch normalization layer being inser-
ted in each convolutional metalayer. If the string
in the first position is 'before', resp. 'after',
then batch normalization takes place before, resp.
after, the nonlinearity in each convolutional meta-
layer. Keywords for `torch.nn.BatchNorm2d` can be
supplied in the form a `dict` and included as the
second element of this tuple; those will be applied
in each convolutional metalayer's batch normalizat-
ion layer. Default: `('before',)`.
$dropout$ (`float`): If greater than zero, add a dropout
layer with this probablity before each nonlinearity.
Default: `0`.
$outfn$ (`nn.Module`): A function to pipe out though lastly
in the `forward` method; The default is `log_softmax`.
For regression, you likely want to put `None`.
$means$ (`torch.Tensor`): A tensor typically holding the
means of the training data.
$stdevs$ (`torch.Tensor`): A tensor typically holding the
standard deviations of the training data.
>>> `model = ConvFFNet((28,28), 10, (1,16,8), (100,50))`
>>> `xss = torch.rand(100,28,28)` # e.g., b&w images
>>> `yhatss = model(xss)`
>>> `yhatss.size()`
torch.Size([100, 10])
>>> `bn = ('before',{'momentum':0.9})`
>>> `model=ConvFFNet((28,28),8,(1,16),(100,),batchnorm=bn)`
>>> `xss = torch.rand(100,28,28)` # e.g., b&w images
>>> `yhatss = model(xss)`
>>> `yhatss.size()`
torch.Size([100, 8])
>>> `print(model.short_repr(color=False))`
Conv.: channels 1 16 ReLU batchnorm:before dropout:0
Dense: widths 3136 100 8 ReLU dropout:0
>>> `model=ConvFFNet((28,28),8,(1,16),(),batchnorm=bn)`
>>> `print(model.short_repr(color=False))`
Conv.: channels 1 16 ReLU batchnorm:before dropout:0
Dense: widths 3136 8 ReLU dropout:0
"""
du.utils._check_kwargs(kwargs, ['conv_kernels','pool_kernels','means',
'stdevs','outfn','nonlins','batchnorm','dropout'])
means = kwargs.get('means', None)
stdevs = kwargs.get('stdevs', None)
assert len(in_size) == 2,\
'in_size must have length 2 not {}'.format(len(in_size))
super().__init__(means = means, stdevs = stdevs)
self.outfn = kwargs.get('outfn',
lambda xss: torch.log_softmax(xss,dim=1))
conv_kernels = kwargs.get('conv_kernels',(len(channels)-1)*[5])
pool_kernels = kwargs.get('pool_kernels',(len(channels)-1)*[2])
nonlins = kwargs.get('nonlins', (nn.ReLU(), nn.ReLU()))
dropout = kwargs.get('dropout', 0)
batchnorm = kwargs.get('batchnorm', ('before',))
# build the convolutional part:
self.conv, out_size = convFFhidden(
channels, conv_kernels, pool_kernels, nonlins = nonlins[0],
batchnorm = batchnorm, dropout = dropout)
# build the dense part
n_inputs_dense = channels[-1]*(lambda x,y: x*y)(*out_size(*in_size))
self.dense = denseFFhidden(
n_inputs = n_inputs_dense, n_outputs = n_out, widths = widths,
nonlins = (nonlins[1],), dropout = dropout)
# build a short representation string
nonlins = list(map(lambda mo: repr(mo)[repr(mo).rfind('.')+1:-2], nonlins))
batchnorm = 'none' if len(batchnorm)==0 else batchnorm[0]
convpart = functools.reduce(lambda x, y: x + ' ' + y,
['Conv.: ~channels~'] + list(map(lambda x: '`'+str(x)+'`', channels)) \
+ ['`'+nonlins[0]+'`'] + ['~batchnorm~:'+ '`'+str(batchnorm)+'`']\
+ ['~dropout~:'+'`'+str(dropout)+'`'])
densepart = functools.reduce(lambda x, y: x + ' ' + y,
['\nDense: ~widths~'] \
+ list(map(lambda x: '`'+str(x)+'`', (n_inputs_dense,) \
+ tuple(widths) + (n_out,))) + ['`'+nonlins[1]+'`']\
+ ['~dropout~:'+'`'+str(dropout)+'`'])
self.repr_ = convpart + densepart
def forward(self, xss):
"""Forward inputs.
Forwards features (of a mini-batch of examples) through
the convolutional part of the model followed by the ful-
ly-connected part.
Args:
$xss$ (`Tensor`): The tensor to be forwarded.
Returns:
(`Tensor`). The forwarded tensor.
"""
xss = self.conv(xss.unsqueeze(1))
xss = self.dense(xss.reshape(len(xss),-1))
if self.outfn: xss = self.outfn(xss)
return xss
def short_repr(self, color=True):
"""Return concise representaton string."""
return du.utils._markup(self.repr_, strip = not color)
class OneMetaCNN(FFNet_):
"""One meta-layer CNN with a two fully-connected layers.
Note: Consider using `ConvFFNet` which generalizes this.
"""
def __init__(self, in_size, n_out, channels, **kwargs):
"""Constructor.
Args:
$in_size$ (`Tuple[int]`): A tuple of length 2 holding the
width and height of each input.
$n_out$ (`int`): Number of outputs from the model. This is
10 to classify digits, or 1 for a regression problem.
$channels$ (`Tuple(int)`). This is `(in_channels, out_chann`
`els)` where 'channels' is that of the convolutional
part of the metalayer.
Kwargs:
$outfn$ (`nn.Module`): a function to pipe out though lastly
in the `forward` method; The default is `log_softmax`.
For regression, you likely want to put `None`.
$means$ (`torch.Tensor`): A tensor typically holding the
means of the training data.
$stdevs$ (`torch.Tensor`): A tensor typically holding the
standard deviations of the training data.
"""
du.utils._check_kwargs(kwargs, ['means', 'stdevs', 'outfn'])
means = kwargs.get('means', None)
stdevs = kwargs.get('stdevs', None)
self.outfn = kwargs.get('outfn',
lambda xss: torch.log_softmax(xss,dim=1))
assert len(in_size) == 2,\
'in_size must have length 2 not {}'.format(len(in_size))
super().__init__(means = means, stdevs = stdevs)
self.meta_layer = nn.Sequential(
nn.Conv2d(
in_channels=channels[0],
out_channels=channels[1],
kernel_size = 5,
stride = 1,
padding = 2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size = 2, stride = 2, padding = 0)
)
self.fc_layer = nn.Linear(int(channels[1]*in_size[0]*in_size[1]/4),n_out)
self.register_buffer('means', means)
self.register_buffer('stdevs', stdevs)
def forward(self, xss):
"""Forward inputs.
Forwards features (of a mini-batch of examples) through,
in turn, a meta-layer and a fully-connected layer.
Args:
$xss$ (`torch.Tensor`): The tensor to be forwarded.
Returns:
(`torch.Tensor`). The forwarded tensor.
"""
xss = torch.unsqueeze(xss, dim=1)
xss = self.meta_layer(xss)
xss = self.fc_layer(xss.reshape(len(xss),-1))
if self.outfn: xss = self.outfn(xss)
return xss
class TwoMetaCNN(FFNet_):
"""Two meta-layer CNN with three fully-connected layers.
Note: Consider using `DenseFFNet` which generalizes this.
"""
def __init__(self, in_size, n_out, channels, width, **kwargs):
"""Constructor.
Args:
$in_size$ (`Tuple[int]`): A tuple of length 2 holding the
width and height of each input.
$n_out$ (`int`): Number of outputs from the model. This is
10 to classify digits, or 1 for a regression problem.
$channels$ (`Tuple(int)`). This is a triple the first ent-
ry of which is `in_channels` for the convolutional
part of the first metalayer; the second and third
entries are `out_channels` for the convolutional
parts of the first and second metalayers, resp.
$width$ (`int`): the widths (no. of nodes) in the second
layers of the dense part.
Kwargs:
$outfn$ (`nn.Module`): a function to pipe out though lastly
in the `forward` method; The default is `log_softmax`.
For regression, you likely want to put `None`.
$means$ (`torch.Tensor`): A tensor typically holding the
means of the training data.
$stdevs$ (`torch.Tensor`): A tensor typically holding the
standard deviations of the training data.
"""
du.utils._check_kwargs(kwargs, ['means', 'stdevs', 'outfn'])
means = kwargs.get('means', None)
stdevs = kwargs.get('stdevs', None)
assert len(in_size) == 2,\
'in_size must have length 2 not {}'.format(len(in_size))
self.outfn = kwargs.get('outfn',
lambda xss: torch.log_softmax(xss,dim=1))
super().__init__(means = means, stdevs = stdevs)
self.metalayer1 = nn.Sequential(# A mini-batch of size of N to this should
# have size:
nn.Conv2d( # N x channels[0] x in_size[0] x in_size[1]
in_channels=channels[0],
out_channels=channels[1],# And the output of Conv2d is still size:
kernel_size=5, # N x channels[1] x in_size[0] x in_size[1]
stride=1,
padding = 2
),
nn.ReLU(),
nn.MaxPool2d(kernel_size = 2, stride = 2, padding = 0)
) # Downsampling with MaxPool we have that
self.metalayer2 = nn.Sequential( # the input here is:
nn.Conv2d( # N x channels[1] x 10 x 10.
in_channels=channels[1],
out_channels=channels[2],
kernel_size=3, # And the ouput of this Conv2d is:
stride=1, # N x channels[2] x in_size[0]/2 x in_size[1]/2.
padding = 1
),
nn.ReLU(),
nn.MaxPool2d(kernel_size = 2, stride = 2, padding = 0)
) # Downsampling, again, we have
# N x channels[2] x in_size[0]/4 x in_size[1]/4.
self.fc_layer1 = nn.Linear(int(channels[2]*in_size[0]*in_size[1]/16), width)
self.fc_layer2 = nn.Linear(width, n_out)
def forward(self, xss):
"""Forward inputs.
Forwards features (of a mini-batch of examples) through,
in turn, two meta-layers and two fully-connected layers,
followed by logsoftmax.
Args:
$xss$ (`torch.Tensor`): The tensor to be forwarded.
Returns:
(`torch.Tensor`). The forwarded tensor.
"""
xss = self.metalayer2(self.metalayer1(xss.unsqueeze(1)))
xss = self.fc_layer1(xss.reshape(len(xss),-1))
xss = self.fc_layer2(torch.relu(xss))
return torch.log_softmax(xss, dim=1)
if __name__ == '__main__':
import inspect
import doctest
# find the user defined functions
_local_functions = [(name,ob) for (name, ob) in sorted(locals().items())\
if callable(ob) and ob.__module__ == __name__]
#remove markdown
# from the docstring for this module
globals()['__doc__'] = du.utils._markup(globals()['__doc__'],strip = True)
# from the functions (methods are fns in Python3) defined in this module
for _, _ob in _local_functions:
if inspect.isfunction(_ob):
_ob.__doc__ = du.utils._markup(_ob.__doc__,strip = True)
# below we find all the methods that are not inherited
if inspect.isclass(_ob):
_parents = inspect.getmro(_ob)[1:]
_parents_methods = set()
for _parent in _parents:
_members = inspect.getmembers(_parent, inspect.isfunction)
_parents_methods.update(_members)
_child_methods = set(inspect.getmembers(_ob, inspect.isfunction))
_child_only_methods = _child_methods - _parents_methods
for name,_meth in _child_only_methods:
_ob.__dict__[name].__doc__ = du.utils._markup(_meth.__doc__,strip =True)
# run doctests
failures, _ = doctest.testmod(optionflags=doctest.ELLIPSIS)
# print signatures
if failures == 0:
from inspect import signature
for name, ob in _local_functions:
print(name,'\n ', inspect.signature(ob)) | PypiClean |
/Crestify_Unalix-0.6.1.tar.gz/Crestify_Unalix-0.6.1/unalix/_http.py | from http.client import HTTPConnection, HTTPSConnection
from http.cookiejar import CookieJar, DefaultCookiePolicy
import os
from urllib.parse import urlparse, urlunparse
from ._config import (
allowed_cookies,
httpopt
)
from ._exceptions import InvalidScheme
from ._utils import requote_uri
def create_connection(scheme, netloc):
"""This function is used to create HTTP and HTTPS connections.
Parameters:
scheme (`str`):
Scheme (must be 'http' or 'https').
netloc (`str`):
Netloc or hostname.
Raises:
InvalidScheme: In case the provided *scheme* is not valid.
Usage:
>>> from unalix._utils import create_connection
>>> create_connection("http", "example.com")
<http.client.HTTPConnection object at 0xad219bb0>
"""
if scheme == "http":
connection = HTTPConnection(netloc, timeout=httpopt.timeout)
elif scheme == "https":
connection = HTTPSConnection(netloc, context=httpopt.ssl_context, timeout=httpopt.timeout)
else:
raise InvalidScheme(f"Expecting 'http' or 'https', but got: {scheme}")
return connection
def handle_redirects(url, response):
"""This function is used to handle HTTP redirects."""
location = response.headers.get("Location")
if location is None:
content_location = response.headers.get("Content-Location")
if content_location is None:
return None
else:
location = content_location
# https://stackoverflow.com/a/27357138
location = requote_uri(
location.encode(encoding="latin1").decode(encoding='utf-8')
)
if location.startswith("http://") or location.startswith("https://"):
return location
scheme, netloc, path, params, query, fragment = urlparse(url)
if location.startswith("/"):
return urlunparse(
(scheme, netloc, location, "", "", "")
)
path = os.path.join(os.path.dirname(path), location)
return urlunparse(
(scheme, netloc, path, "", "", "")
)
def add_missing_attributes(url, connection):
try:
connection.cookies
except AttributeError:
connection.cookies = {}
def add_unredirected_header(key, value):
connection.headers.update(
{
key: value
}
)
connection.has_header = lambda header_name: False
connection.add_unredirected_header = add_unredirected_header
connection.get_full_url = lambda: url
connection.unverifiable = True
connection.headers = {}
connection.origin_req_host = urlparse(url).netloc
def create_cookie_jar(policy_type=None):
cookie, policy = (
CookieJar(), DefaultCookiePolicy()
)
if policy_type == "reject_all":
policy.set_ok = lambda cookie, request: False
elif policy_type == "allow_all":
policy.set_ok = lambda cookie, request: True
elif policy_type == "allow_if_needed":
policy.set_ok = lambda cookie, request: (
cookie.domain in allowed_cookies
)
cookie.set_policy(policy)
return cookie | PypiClean |
/Django-Org-Associations-0.1.4.tar.gz/Django-Org-Associations-0.1.4/django_associations/migrations/0001_initial.py | from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Association',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dts_insert', models.DateTimeField(auto_now_add=True)),
('dts_update', models.DateTimeField(blank=True, null=True)),
('dts_delete', models.DateTimeField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dts_insert', models.DateTimeField(auto_now_add=True)),
('dts_update', models.DateTimeField(blank=True, null=True)),
('dts_delete', models.DateTimeField(blank=True, null=True)),
('uid_verifier', models.UUIDField(default=uuid.uuid4, null=True, unique=True)),
('has_accepted', models.BooleanField(default=False)),
('org_managers', models.BooleanField(default=False)),
('hrm_managers', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Organisation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dts_insert', models.DateTimeField(auto_now_add=True)),
('dts_update', models.DateTimeField(blank=True, null=True)),
('dts_delete', models.DateTimeField(blank=True, null=True)),
('label', models.CharField(max_length=128, unique=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='member',
name='organisation',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='member', to='django_associations.Organisation'),
),
migrations.AddField(
model_name='member',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='association_member', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='association',
name='associations',
field=models.ManyToManyField(blank=True, related_name='association_associations', to='django_associations.Organisation'),
),
migrations.AddField(
model_name='association',
name='organisation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='association_organisation', to='django_associations.Organisation'),
),
] | PypiClean |
/Brian2-2.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/brian2/sphinxext/generate_examples.py | import fnmatch
import glob
import os
import shutil
from collections import defaultdict
class GlobDirectoryWalker:
# a forward iterator that traverses a directory tree
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while True:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
if os.path.isdir(self.directory):
self.files = os.listdir(self.directory)
else:
self.files = []
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
def main(rootpath, destdir):
if not os.path.exists(destdir):
shutil.os.makedirs(destdir)
examplesfnames = [fname for fname in GlobDirectoryWalker(rootpath, "*.py")]
additional_files = [
fname
for fname in GlobDirectoryWalker(rootpath, "*.[!py]*")
if not os.path.basename(fname) == ".gitignore"
]
print(f"Documenting {len(examplesfnames)} examples")
examplespaths = []
examplesbasenames = []
relativepaths = []
outnames = []
for f in examplesfnames:
path, file = os.path.split(f)
relpath = os.path.relpath(path, rootpath)
if relpath == ".":
relpath = ""
path = os.path.normpath(path)
filebase, ext = os.path.splitext(file)
exname = filebase
if relpath:
exname = relpath.replace("/", ".").replace("\\", ".") + "." + exname
examplespaths.append(path)
examplesbasenames.append(filebase)
relativepaths.append(relpath)
outnames.append(exname)
# We assume all files are encoded as UTF-8
examplescode = []
for fname in examplesfnames:
with open(fname, encoding="utf-8") as f:
examplescode.append(f.read())
examplesdocs = []
examplesafterdoccode = []
for code in examplescode:
codesplit = code.split("\n")
comment_lines = 0
for line in codesplit:
if line.startswith("#") or len(line) == 0:
comment_lines += 1
else:
break
codesplit = codesplit[comment_lines:]
readingdoc = False
doc = []
afterdoccode = ""
for i in range(len(codesplit)):
stripped = codesplit[i].strip()
if stripped[:3] == '"""' or stripped[:3] == "'''":
if not readingdoc:
readingdoc = True
else:
afterdoccode = "\n".join(codesplit[i + 1 :])
break
elif readingdoc:
doc.append(codesplit[i])
else: # No doc
afterdoccode = "\n".join(codesplit[i:])
break
examplesdocs.append("\n".join(doc))
examplesafterdoccode.append(afterdoccode)
categories = defaultdict(list)
examples = zip(
examplesfnames,
examplespaths,
examplesbasenames,
examplescode,
examplesdocs,
examplesafterdoccode,
relativepaths,
outnames,
)
# Get the path relative to the examples director (not relative to the
# directory where this file is installed
if "BRIAN2_DOCS_EXAMPLE_DIR" in os.environ:
rootdir = os.environ["BRIAN2_DOCS_EXAMPLE_DIR"]
else:
rootdir, _ = os.path.split(__file__)
rootdir = os.path.normpath(os.path.join(rootdir, "../../examples"))
eximgpath = os.path.abspath(
os.path.join(rootdir, "../docs_sphinx/resources/examples_images")
)
print("Searching for example images in directory", eximgpath)
for _fname, _path, basename, _code, docs, afterdoccode, relpath, exname in examples:
categories[relpath].append((exname, basename))
title = "Example: " + basename
output = ".. currentmodule:: brian2\n\n"
output += ".. " + basename + ":\n\n"
output += title + "\n" + "=" * len(title) + "\n\n"
note = f"""
.. only:: html
.. |launchbinder| image:: http://mybinder.org/badge.svg
.. _launchbinder: https://mybinder.org/v2/gh/brian-team/brian2-binder/master?filepath=examples/{exname.replace('.', '/')}.ipynb
.. note::
You can launch an interactive, editable version of this
example without installing any local files
using the Binder service (although note that at some times this
may be slow or fail to open): |launchbinder|_
"""
output += note + "\n\n"
output += docs + "\n\n::\n\n"
output += "\n".join([" " + line for line in afterdoccode.split("\n")])
output += "\n\n"
eximgpattern = os.path.join(eximgpath, f"{exname}.*")
images = glob.glob(eximgpattern + ".png") + glob.glob(eximgpattern + ".gif")
for image in sorted(images):
_, image = os.path.split(image)
print("Found example image file", image)
output += f".. image:: ../resources/examples_images/{image}\n\n"
with open(os.path.join(destdir, exname + ".rst"), "w", encoding="utf-8") as f:
f.write(output)
category_additional_files = defaultdict(list)
for fname in additional_files:
path, file = os.path.split(fname)
relpath = os.path.relpath(path, rootpath)
if relpath == ".":
relpath = ""
full_name = relpath.replace("/", ".").replace("\\", ".") + "." + file + ".rst"
category_additional_files[relpath].append((file, full_name))
with open(fname, encoding="utf-8") as f:
print(fname)
content = f.read()
output = file + "\n" + "=" * len(file) + "\n\n"
output += ".. code:: none\n\n"
content_lines = ["\t" + line for line in content.split("\n")]
output += "\n".join(content_lines)
output += "\n\n"
with open(os.path.join(destdir, full_name), "w", encoding="utf-8") as f:
f.write(output)
mainpage_text = "Examples\n"
mainpage_text += "========\n\n"
def insert_category(category, mainpage_text):
if category:
label = category.lower().replace(" ", "-").replace("/", ".")
mainpage_text += f"\n.. _{label}:\n\n"
mainpage_text += "\n" + category + "\n" + "-" * len(category) + "\n\n"
mainpage_text += ".. toctree::\n"
mainpage_text += " :maxdepth: 1\n\n"
for exname, basename in sorted(categories[category]):
mainpage_text += f" {basename} <{exname}>\n"
for fname, full_name in sorted(category_additional_files[category]):
mainpage_text += f" {fname} <{full_name}>\n"
return mainpage_text
mainpage_text = insert_category("", mainpage_text)
for category in sorted(categories.keys()):
if category:
mainpage_text = insert_category(category, mainpage_text)
with open(os.path.join(destdir, "index.rst"), "w") as f:
f.write(mainpage_text)
if __name__ == "__main__":
main("../../examples", "../../docs_sphinx/examples") | PypiClean |
/FLAML-2.0.2-py3-none-any.whl/flaml/autogen/code_utils.py | import signal
import subprocess
import sys
import os
import pathlib
from typing import List, Dict, Tuple, Optional, Union, Callable
import re
import time
from hashlib import md5
import logging
from flaml.autogen import oai
try:
import docker
except ImportError:
docker = None
DEFAULT_MODEL = "gpt-4"
FAST_MODEL = "gpt-3.5-turbo"
# Regular expression for finding a code block
CODE_BLOCK_PATTERN = r"```(\w*)\n(.*?)\n```"
WORKING_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extensions")
UNKNOWN = "unknown"
TIMEOUT_MSG = "Timeout"
DEFAULT_TIMEOUT = 600
def infer_lang(code):
"""infer the language for the code.
TODO: make it robust.
"""
if code.startswith("python ") or code.startswith("pip") or code.startswith("python3 "):
return "sh"
return "python"
def extract_code(text: str, pattern: str = CODE_BLOCK_PATTERN) -> List[Tuple[str, str]]:
"""Extract code from a text.
Args:
text (str): The text to extract code from.
pattern (Optional, str): The regular expression pattern for finding the code block.
Returns:
list: A list of tuples, each containing the language and the code.
"""
# Use a regular expression to find all the code blocks
match = re.findall(pattern, text, flags=re.DOTALL)
# match = re.search(pattern, text, flags=re.DOTALL)
# If a match is found, return the code
# if match:
# return match.group(2), match.group(1)
# If no code block is found, return the whole text
return match if match else [(UNKNOWN, text)]
# _FIND_CODE_SYS_MSG = [
# {
# "role": "system",
# "content": """In the following conversation, an assistant suggests code and a user is expected to run it.
# Read the conversation, and then find all the right code blocks for the user to run next in the right order.
# Only return the code blocks that are expected to run.
# Don't include code blocks which have been executed unless the user is requested to run the same block again.
# When the user needs to run multiple blocks in sequence, make sure to output all the blocks to run in a right order.
# If the line beginning with "# filename" is put before a code block, move it into the code block as the first line.
# Make sure to add the right "python" or "sh" identifier if the language identifier is missing for a code block.
# Don't make other changes to the code blocks.
# Don't reply anything else if at least one code block is expected to run.
# If no code block is expeted to run, check whether the task has been successfully finished at full satisfaction.
# If not, reply with the reason why the task is not finished.""",
# },
# ]
# _FIND_CODE_CONFIG = {
# "model": FAST_MODEL,
# }
# def find_code(messages: List[Dict], sys_msg=None, **config) -> Tuple[List[Tuple[str, str]], str]:
# """Find code from a list of messages.
# Args:
# messages (str): The list of messages to find code from.
# sys_msg (Optional, str): The system message to prepend to the messages.
# config (Optional, dict): The configuration for the API call.
# Returns:
# list: A list of tuples, each containing the language and the code.
# str: The generated text by llm.
# """
# params = {**_FIND_CODE_CONFIG, **config}
# if sys_msg is None or not sys_msg[0]["content"]:
# sys_msg = _FIND_CODE_SYS_MSG
# response = oai.ChatCompletion.create(messages=sys_msg + messages, **params)
# content = oai.Completion.extract_text(response)[0]
# return extract_code(content), content
def generate_code(pattern: str = CODE_BLOCK_PATTERN, **config) -> Tuple[str, float]:
"""Generate code.
Args:
pattern (Optional, str): The regular expression pattern for finding the code block.
The default pattern is for finding a code block in a markdown file.
config (Optional, dict): The configuration for the API call.
Returns:
str: The generated code.
float: The cost of the generation.
"""
response = oai.Completion.create(**config)
return extract_code(oai.Completion.extract_text(response)[0], pattern), response["cost"]
_IMPROVE_FUNCTION_CONFIG = {
"prompt": """Improve the function '{func_name}' to achieve the objective '{objective}'.
The current implementation of the function is as follows:
{file_string}""",
"model": DEFAULT_MODEL,
"request_timeout": 600,
}
def improve_function(file_name, func_name, objective, **config):
"""(work in progress) Improve the function to achieve the objective."""
params = {**_IMPROVE_FUNCTION_CONFIG, **config}
# read the entire file into a str
with open(file_name, "r") as f:
file_string = f.read()
response = oai.Completion.create(
{"func_name": func_name, "objective": objective, "file_string": file_string}, **params
)
return oai.Completion.extract_text(response)[0], response["cost"]
_IMPROVE_CODE_CONFIG = {
"prompt": """Analyze the code in the following files and return a list of suggestions for improvement{followup}, to achieve the objective of '{objective}'.
{code}
""",
"model": DEFAULT_MODEL,
"request_timeout": 900,
}
def improve_code(files, objective, suggest_only=True, **config):
"""Improve the code to achieve a given objective.
Args:
files (list): A list of file names containing the source code.
objective (str): The objective to achieve.
suggest_only (bool): Whether to return only the suggestions or the improved code.
config (Optional, dict): The configuration for the API call.
Returns:
str: The improved code if suggest_only=False; a list of suggestions if suggest_only=True (default).
float: The cost of the generation.
"""
code = ""
for file_name in files:
# read the entire file into a string
with open(file_name, "r") as f:
file_string = f.read()
code += f"""{file_name}:
{file_string}
"""
params = {**_IMPROVE_CODE_CONFIG, **config}
followup = "" if suggest_only else " followed by the improved code"
response = oai.Completion.create({"objective": objective, "code": code, "followup": followup}, **params)
return oai.Completion.extract_text(response)[0], response["cost"]
def timeout_handler(signum, frame):
raise TimeoutError("Timed out!")
def _cmd(lang):
if lang.startswith("python") or lang in ["bash", "sh"]:
return lang
if lang == "shell":
return "sh"
raise NotImplementedError(f"{lang} not recognized in code execution")
def execute_code(
code: Optional[str] = None,
timeout: Optional[int] = None,
filename: Optional[str] = None,
work_dir: Optional[str] = None,
use_docker: Optional[Union[List[str], str, bool]] = docker is not None,
lang: Optional[str] = "python",
) -> Tuple[int, str, str]:
"""Execute code in a docker container.
This function is not tested on MacOS.
Args:
code (Optional, str): The code to execute.
If None, the code from the file specified by filename will be executed.
Either code or filename must be provided.
timeout (Optional, int): The maximum execution time in seconds.
If None, a default timeout will be used. The default timeout is 600 seconds. On Windows, the timeout is not enforced when use_docker=False.
filename (Optional, str): The file name to save the code or where the code is stored when `code` is None.
If None, a file with a randomly generated name will be created.
The randomly generated file will be deleted after execution.
The file name must be a relative path. Relative paths are relative to the working directory.
work_dir (Optional, str): The working directory for the code execution.
If None, a default working directory will be used.
The default working directory is the "extensions" directory under
"path_to_flaml/autogen".
use_docker (Optional, list, str or bool): The docker image to use for code execution.
If a list or a str of image name(s) is provided, the code will be executed in a docker container
with the first image successfully pulled.
If None, False or empty, the code will be executed in the current environment.
Default is True, which will be converted into a list.
If the code is executed in the current environment,
the code must be trusted.
lang (Optional, str): The language of the code. Default is "python".
Returns:
int: 0 if the code executes successfully.
str: The error message if the code fails to execute; the stdout otherwise.
image: The docker image name after container run when docker is used.
"""
assert code is not None or filename is not None, "Either code or filename must be provided."
timeout = timeout or DEFAULT_TIMEOUT
original_filename = filename
if filename is None:
code_hash = md5(code.encode()).hexdigest()
# create a file with a automatically generated name
filename = f"tmp_code_{code_hash}.{'py' if lang.startswith('python') else lang}"
if work_dir is None:
work_dir = WORKING_DIR
filepath = os.path.join(work_dir, filename)
file_dir = os.path.dirname(filepath)
os.makedirs(file_dir, exist_ok=True)
if code is not None:
with open(filepath, "w") as fout:
fout.write(code)
# check if already running in a docker container
in_docker_container = os.path.exists("/.dockerenv")
if not use_docker or in_docker_container:
# already running in a docker container
cmd = [sys.executable if lang.startswith("python") else _cmd(lang), filename]
if sys.platform == "win32":
logging.warning("SIGALRM is not supported on Windows. No timeout will be enforced.")
result = subprocess.run(
cmd,
cwd=work_dir,
capture_output=True,
)
else:
signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(timeout)
# run the code in a subprocess in the current docker container in the working directory
result = subprocess.run(
cmd,
cwd=work_dir,
capture_output=True,
)
signal.alarm(0)
except TimeoutError:
if original_filename is None:
os.remove(filepath)
return 1, TIMEOUT_MSG, None
if original_filename is None:
os.remove(filepath)
abs_path = str(pathlib.Path(filepath).absolute())
else:
abs_path = str(pathlib.Path(work_dir).absolute()) + "/"
if result.returncode:
logs = result.stderr.decode("utf-8")
logs = logs.replace(str(abs_path), "")
else:
logs = result.stdout.decode("utf-8")
return result.returncode, logs, None
# create a docker client
client = docker.from_env()
image_list = (
["python:3-alpine", "python:3", "python:3-windowsservercore"]
if use_docker is True
else [use_docker]
if isinstance(use_docker, str)
else use_docker
)
for image in image_list:
# check if the image exists
try:
client.images.get(image)
break
except docker.errors.ImageNotFound:
# pull the image
print("Pulling image", image)
try:
client.images.pull(image)
break
except docker.errors.DockerException:
print("Failed to pull image", image)
# get a randomized str based on current time to wrap the exit code
exit_code_str = f"exitcode{time.time()}"
abs_path = pathlib.Path(work_dir).absolute()
# if sys.platform == "win32":
# abs_path = str(abs_path).replace("\\", "/")
# abs_path = f"/{abs_path[0].lower()}{abs_path[2:]}"
cmd = [
"sh",
"-c",
f"{_cmd(lang)} {filename}; exit_code=$?; echo -n {exit_code_str}; echo -n $exit_code; echo {exit_code_str}",
]
# create a docker container
container = client.containers.run(
image,
command=cmd,
working_dir="/workspace",
detach=True,
# get absolute path to the working directory
volumes={abs_path: {"bind": "/workspace", "mode": "rw"}},
)
start_time = time.time()
while container.status != "exited" and time.time() - start_time < timeout:
# Reload the container object
container.reload()
if container.status != "exited":
container.stop()
container.remove()
if original_filename is None:
os.remove(filepath)
return 1, TIMEOUT_MSG, image
# try:
# container.wait(timeout=timeout)
# except (ReadTimeout, ConnectionError):
# container.stop()
# container.remove()
# if original_filename is None:
# os.remove(filepath)
# return 1, "Timeout"
# get the container logs
logs = container.logs().decode("utf-8").rstrip()
# commit the image
tag = filename.replace("/", "")
container.commit(repository="python", tag=tag)
# remove the container
container.remove()
# check if the code executed successfully
exit_code = container.attrs["State"]["ExitCode"]
if exit_code == 0:
# extract the exit code from the logs
pattern = re.compile(f"{exit_code_str}(\\d+){exit_code_str}")
match = pattern.search(logs)
exit_code = 1 if match is None else int(match.group(1))
# remove the exit code from the logs
logs = logs if match is None else pattern.sub("", logs)
if original_filename is None:
os.remove(filepath)
if exit_code:
logs = logs.replace(f"/workspace/{filename if original_filename is None else ''}", "")
# return the exit code, logs and image
return exit_code, logs, f"python:{tag}"
_GENERATE_ASSERTIONS_CONFIG = {
"prompt": """Given the signature and docstring, write the exactly same number of assertion(s) for the provided example(s) in the docstring, without assertion messages.
func signature:
{definition}
assertions:""",
"model": FAST_MODEL,
"max_tokens": 256,
"stop": "\n\n",
}
def generate_assertions(definition: str, **config) -> Tuple[str, float]:
"""Generate assertions for a function.
Args:
definition (str): The function definition, including the signature and docstr.
config (Optional, dict): The configuration for the API call.
Returns:
str: The generated assertions.
float: The cost of the generation.
"""
params = {**_GENERATE_ASSERTIONS_CONFIG, **config}
response = oai.Completion.create(
{"definition": definition},
**params,
)
assertions = oai.Completion.extract_text(response)[0]
return assertions, response["cost"]
def _remove_check(response):
"""Remove the check function from the response."""
# find the position of the check function
pos = response.find("def check(")
if pos == -1:
return response
return response[:pos]
def eval_function_completions(
responses: List[str],
definition: str,
test: Optional[str] = None,
entry_point: Optional[str] = None,
assertions: Optional[Union[str, Callable[[str], Tuple[str, float]]]] = None,
timeout: Optional[float] = 3,
use_docker: Optional[bool] = True,
) -> Dict:
"""Select a response from a list of responses for the function completion task (using generated assertions), and/or evaluate if the task is successful using a gold test.
Args:
responses (list): The list of responses.
definition (str): The input definition.
test (Optional, str): The test code.
entry_point (Optional, str): The name of the function.
assertions (Optional, str or Callable): The assertion code which serves as a filter of the responses, or an assertion generator.
When provided, only the responses that pass the assertions will be considered for the actual test (if provided).
timeout (Optional, float): The timeout for executing the code.
Returns:
dict: The success metrics.
"""
n = len(responses)
if assertions is None:
# no assertion filter
success_list = []
for i in range(n):
response = _remove_check(responses[i])
code = (
f"{response}\n{test}\ncheck({entry_point})"
if response.startswith("def")
else f"{definition}{response}\n{test}\ncheck({entry_point})"
)
success = execute_code(code, timeout=timeout, use_docker=use_docker)[0] == 0
success_list.append(success)
return {
"expected_success": 1 - pow(1 - sum(success_list) / n, n),
"success": any(s for s in success_list),
}
if callable(assertions) and n > 1:
# assertion generator
assertions, gen_cost = assertions(definition)
else:
gen_cost = 0
if n > 1 or test is None:
for i in range(n):
response = responses[i] = _remove_check(responses[i])
code = (
f"{response}\n{assertions}" if response.startswith("def") else f"{definition}{response}\n{assertions}"
)
succeed_assertions = execute_code(code, timeout=timeout, use_docker=use_docker)[0] == 0
if succeed_assertions:
break
else:
# just test, no need to check assertions
succeed_assertions = False
i, response = 0, responses[0]
if test is None:
# no test code
return {
"index_selected": i,
"succeed_assertions": succeed_assertions,
"gen_cost": gen_cost,
"assertions": assertions,
}
code_test = (
f"{response}\n{test}\ncheck({entry_point})"
if response.startswith("def")
else f"{definition}{response}\n{test}\ncheck({entry_point})"
)
success = execute_code(code_test, timeout=timeout, use_docker=use_docker)[0] == 0
return {
"index_selected": i,
"succeed_assertions": succeed_assertions,
"success": success,
"gen_cost": gen_cost,
"assertions": assertions,
}
_FUNC_COMPLETION_PROMPT = "# Python 3{definition}"
_FUNC_COMPLETION_STOP = ["\nclass", "\ndef", "\nif", "\nprint"]
_IMPLEMENT_CONFIGS = [
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "seed": 0},
{"model": FAST_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 7, "seed": 0},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "temperature": 0, "seed": 1},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 2, "seed": 2},
{"model": DEFAULT_MODEL, "prompt": _FUNC_COMPLETION_PROMPT, "stop": _FUNC_COMPLETION_STOP, "n": 1, "seed": 2},
]
class PassAssertionFilter:
def __init__(self, assertions):
self._assertions = assertions
self.cost = 0
self.metrics = self.responses = None
def pass_assertions(self, context, response, **_):
"""Check if the response passes the assertions."""
responses = oai.Completion.extract_text(response)
metrics = eval_function_completions(responses, context["definition"], assertions=self._assertions)
self._assertions = metrics["assertions"]
self.cost += metrics["gen_cost"]
self.metrics = metrics
self.responses = responses
return metrics["succeed_assertions"]
def implement(
definition: str,
configs: Optional[List[Dict]] = None,
assertions: Optional[Union[str, Callable[[str], Tuple[str, float]]]] = generate_assertions,
) -> Tuple[str, float]:
"""Implement a function from a definition.
Args:
definition (str): The function definition, including the signature and docstr.
configs (list): The list of configurations for completion.
assertions (Optional, str or Callable): The assertion code which serves as a filter of the responses, or an assertion generator.
Returns:
str: The implementation.
float: The cost of the implementation.
int: The index of the configuration which generates the implementation.
"""
cost = 0
configs = configs or _IMPLEMENT_CONFIGS
if len(configs) > 1 and callable(assertions):
assertions, cost = assertions(definition)
assertion_filter = PassAssertionFilter(assertions)
response = oai.Completion.create(
{"definition": definition}, config_list=configs, filter_func=assertion_filter.pass_assertions
)
cost += assertion_filter.cost + response["cost"]
return assertion_filter.responses[assertion_filter.metrics["index_selected"]], cost, response["config_id"]
# for i, config in enumerate(configs):
# response = oai.Completion.create({"definition": definition}, **config)
# cost += oai.Completion.cost(response)
# responses = oai.Completion.extract_text(response)
# metrics = eval_function_completions(responses, definition, assertions=assertions)
# assertions = metrics["assertions"]
# cost += metrics["gen_cost"]
# if metrics["succeed_assertions"] or i == len(configs) - 1:
# return responses[metrics["index_selected"]], cost, i | PypiClean |
/Adyan_test-0.2.9-py3-none-any.whl/Adyan/Utils/Mongo_conn.py |
from datetime import datetime
from pymongo import MongoClient
class MongoConn(object):
def __init__(self, db_name, config):
"""
:param db_name:
:param config: {
"host": "192.168.20.211",
# "host": "47.107.86.234",
"port": 27017
}
"""
self.db = MongoClient(**config, connect=True)[db_name]
class DBBase(object):
def __init__(self, collection, db_name, config):
self.mg = MongoConn(db_name, config)
self.collection = self.mg.db[collection]
def exist_list(self, data, key, get_id: callable):
lst = [get_id(obj) for obj in data]
print('lst', len(lst))
set_list = set([
i.get(key)
for i in list(
self.collection.find({key: {"$in": lst}})
)
])
set_li = set(lst) - set_list
with open("./ignore/null_field.txt", "rt", encoding="utf-8") as f:
_ignore = [int(line.split(",")[0]) for line in f.readlines()]
exist = list(set_li - set(_ignore))
print(len(exist))
for obj in data:
if get_id(obj) in exist:
yield obj
def exist(self, dic):
"""
单条查询
:param dic:
:return:1,0
"""
return self.collection.find(dic).count()
def update_one(self, dic, item=None):
result = self.exist(dic)
if item and result == 1:
item['updateTime'] = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
self.collection.update(dic, {"$set": item})
elif item:
self.collection.update(dic, {"$set": item}, upsert=True)
def insert_one(self, param):
"""
:param param: 多条list 或者 单条dict
:return:
"""
self.collection.insert(param)
def find_len(self, dic):
return self.collection.find(dic).count()
def find_one(self):
return self.collection.find_one()
def find_list(self, count, dic=None, page=None, ):
"""
查询数据
:param count:查询量
:param dic:{'city': ''} 条件查询
:param page:分页查询
:return:
"""
if dic:
return list(self.collection.find(dic).limit(count))
if page:
return list(self.collection.find().skip(page * count - count).limit(count))
def daochu(self):
return list(self.collection.find({'$and': [
{'$or': [{"transaction_medal": "A"}, {"transaction_medal": "AA"}]},
{"tpServiceYear": {'$lte': 2}},
{"overdue": {'$ne': "店铺已过期"}},
{"province": "广东"}
]}))
# return self.collection.find().skip(count).next()
def test(self):
return self.collection
class MongoPerson(DBBase):
def __init__(self, table, db_name, config):
super(MongoPerson, self).__init__(table, db_name, config) | PypiClean |
/FIXation-0.0.4.tar.gz/FIXation-0.0.4/README.md | Generate nice looking documents from your FIX repository.
### Command-line interface
`fix-parse [--base fixation/fix_repository_2010_edition_20140507] [--document] [--fiximate]`
`--base` points to where you're storing your fix repository.
`--document` generates a single-page document.html suitable for turning into a pdf.
`--fiximate` generates fiximate-styled pages, suitable for online-publishing.
### Using your own templates
The core of fixation is centered around Jinja2 templates, before you begin you should bookmark [http://jinja.pocoo.org/docs/2.10/templates/] contains well-written and easy to follow documentation on how to write templates.
Now the easiest way to get started is to copy the templates/ folder into your current working directory, there are a few base templates which contain the generic structure and then more specific templates which extends the bases.
### Writing templates
In the case of `--fiximate` you'll have a `repository` which will tell you what `type` you're handling, the `copyright` and `version`
You also have access to the Jinja2 filter `linkify` (which gives you a relative link to the item) and the tests `messages`, `field`, `component`, and `blacklist`/`whitelist` (with or without context).
The following is how messages.html uses linkify to generate links.
```jinja2
<a href="{{ msgcontent | linkify }}">
```
The following is from how messages.html check if something is a field or component.
```jinja2
{% if msgcontent is component %}
```
The following example is from document.html and handles blacklisting/whitelisting with and without context.
```jinja2
{% if msgcontent is not blacklisted(message) %}
{% if message is not blacklisted %}
```
### document-settings.json
If you want to blacklist or whitelist things there are two ways to do it, in the following example the StandardTrailer will be considered blacklisted in the context of message ResendRequest (2)
Anything put in extra_data will be inserted into the document so the following example would let you use `{{ key }}` to access the list.
```json
{
"blacklist": ["0", "StandardHeader"],
"ctx_blacklist": { "2": ["StandardTrailer"] },
"extra_data": { "key": ["value1", "value2"] }
}
``` | PypiClean |
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/bower_components/jquery/src/queue.js | define( [
"./core",
"./data/var/dataPriv",
"./deferred",
"./callbacks"
], function( jQuery, dataPriv ) {
"use strict";
jQuery.extend( {
queue: function( elem, type, data ) {
var queue;
if ( elem ) {
type = ( type || "fx" ) + "queue";
queue = dataPriv.get( elem, type );
// Speed up dequeue by getting out quickly if this is just a lookup
if ( data ) {
if ( !queue || Array.isArray( data ) ) {
queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
} else {
queue.push( data );
}
}
return queue || [];
}
},
dequeue: function( elem, type ) {
type = type || "fx";
var queue = jQuery.queue( elem, type ),
startLength = queue.length,
fn = queue.shift(),
hooks = jQuery._queueHooks( elem, type ),
next = function() {
jQuery.dequeue( elem, type );
};
// If the fx queue is dequeued, always remove the progress sentinel
if ( fn === "inprogress" ) {
fn = queue.shift();
startLength--;
}
if ( fn ) {
// Add a progress sentinel to prevent the fx queue from being
// automatically dequeued
if ( type === "fx" ) {
queue.unshift( "inprogress" );
}
// Clear up the last queue stop function
delete hooks.stop;
fn.call( elem, next, hooks );
}
if ( !startLength && hooks ) {
hooks.empty.fire();
}
},
// Not public - generate a queueHooks object, or return the current one
_queueHooks: function( elem, type ) {
var key = type + "queueHooks";
return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
empty: jQuery.Callbacks( "once memory" ).add( function() {
dataPriv.remove( elem, [ type + "queue", key ] );
} )
} );
}
} );
jQuery.fn.extend( {
queue: function( type, data ) {
var setter = 2;
if ( typeof type !== "string" ) {
data = type;
type = "fx";
setter--;
}
if ( arguments.length < setter ) {
return jQuery.queue( this[ 0 ], type );
}
return data === undefined ?
this :
this.each( function() {
var queue = jQuery.queue( this, type, data );
// Ensure a hooks for this queue
jQuery._queueHooks( this, type );
if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
jQuery.dequeue( this, type );
}
} );
},
dequeue: function( type ) {
return this.each( function() {
jQuery.dequeue( this, type );
} );
},
clearQueue: function( type ) {
return this.queue( type || "fx", [] );
},
// Get a promise resolved when queues of a certain type
// are emptied (fx is the type by default)
promise: function( type, obj ) {
var tmp,
count = 1,
defer = jQuery.Deferred(),
elements = this,
i = this.length,
resolve = function() {
if ( !( --count ) ) {
defer.resolveWith( elements, [ elements ] );
}
};
if ( typeof type !== "string" ) {
obj = type;
type = undefined;
}
type = type || "fx";
while ( i-- ) {
tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
if ( tmp && tmp.empty ) {
count++;
tmp.empty.add( resolve );
}
}
resolve();
return defer.promise( obj );
}
} );
return jQuery;
} ); | PypiClean |
/Hyperstatic-0.2.0-cp38-cp38-win_amd64.whl/hyperstatic/core/fe_model/element/quad/DKGQ.py | import numpy as np
import scipy.sparse as spr
from hyperstatic.core.fe_model.meta.membranes import GQ12
from hyperstatic.core.fe_model.meta.plates import metaDKQ
from hyperstatic.core.fe_model.node import Node
from hyperstatic.core.fe_model.element.quad import Quad
from hyperstatic.core.fe_model.section.shell_section import ShellSection
import quadpy
class DKGQ(Quad):
def __init__(self,name:str,section:ShellSection,node_i:Node, node_j:Node, node_k:Node, node_l:Node):
self.__section=section
super(DKGQ,self).__init__(name,node_i, node_j, node_k, node_l,24)
def integrate_K(self):
X=np.array([
self.nodes[0].loc,
self.nodes[1].loc,
self.nodes[2].loc,
self.nodes[3].loc]
)
X_=X-self.local_csys.origin #not necessary
X_=X_.dot(self.local_csys.transform_matrix.T)[:,:2]
E=self.__section.E
mu=self.__section.mu
t=self.__section.t
Km=np.zeros((12,12))
Kp=np.zeros((12,12))
if self.__section.ele_type=="membrane" or self.__section.ele_type=="shell":
BDB=GQ12.get_binary_BDB()
def func_m(x):
res=[]
for xi,eta in zip(x[0],x[1]):
res.append(BDB(E,mu,t,xi,eta,*tuple(X_.reshape(X_.size))))
res=np.stack(res,axis=2)
return res
scheme = quadpy.c2.get_good_scheme(2)
Km = scheme.integrate(
func_m,
quadpy.c2.rectangle_points([-1.0, 1.0], [-1.0, 1.0]),
) #12x12
if self.__section.ele_type=="plate" or self.__section.ele_type=="shell":
bBDB=metaDKQ.get_binary_BDB()
def func_p(x):
res=[]
for xi,eta in zip(x[0],x[1]):
res.append(bBDB(E,mu,t,xi,eta,*tuple(X_.reshape(X_.size))))
return np.stack(res,axis=2)
scheme = quadpy.c2.get_good_scheme(2)
Kp = scheme.integrate(
func_p,
quadpy.c2.rectangle_points([-1.0, 1.0], [-1.0, 1.0]),
)
K=np.zeros((24,24))
for i in range(4):
for j in range(4):
K[i*6+2:i*6+5,j*6+2:j*6+5]=Kp[i*3:i*3+3,j*3:j*3+3]
K[ i*6:i*6+2, j*6:j*6+2]=Km[i*3:i*3+2,j*3:j*3+2]
K[ i*6+5, j*6:j*6+2]=Km[ i*3+2,j*3:j*3+2]
K[ i*6:i*6+2, j*6+5]=Km[i*3:i*3+2, j*3+2]
K[ i*6+5, j*6+5]=Km[ i*3+2, j*3+2]
return spr.csr_matrix(K)
@property
def transform_matrix(self):
T=np.zeros((24,24))
for i in range(8):
T[3*i:3*i+3,3*i:3*i+3]=self.local_csys.transform_matrix
return spr.csr_matrix(T)
if __name__=='__main__':
from hyperstatic.core.fe_model.node import Node
from hyperstatic.core.fe_model.material.isotropy import IsotropicMaterial
from hyperstatic.core.fe_model.section.shell_section import ShellSection
from hyperstatic.core.fe_model.node import Node
# n1=Node("1",1,-1,0)
# n2=Node("2",1,1,0)
# n3=Node("3",-1,1,0)
# n4=Node("4",-1,-1,0)
n1=Node("1",-1,-1,0)
n2=Node("2",1,-1,0)
n3=Node("3",1,1,0)
n4=Node("4",-1,1,0)
steel=IsotropicMaterial('mat',7.849e3,2e11,0.3,1.17e-5) #Q345
section=ShellSection('sec',steel,0.25,'shell')
ele=DKGQ("ele",section,n1,n2,n3,n4)
K=ele.integrate_K()
assert K.shape==(24,24)
i=2
j=2
print(K[i*6+2,j*6:j*6+6 ]/1e8) | PypiClean |
/Exegol-4.2.5.tar.gz/Exegol-4.2.5/README.md | <div align="center">
<img alt="latest commit on master" width="600" src="https://raw.githubusercontent.com/ThePorgs/Exegol-docs/main/.assets/rounded_social_preview.png">
<br><br>
<a target="_blank" rel="noopener noreferrer" href="https://pypi.org/project/Exegol" title=""><img src="https://img.shields.io/pypi/v/Exegol?color=informational" alt="pip package version"></a>
<img alt="Python3.7" src="https://img.shields.io/badge/Python-3.7+-informational">
<a target="_blank" rel="noopener noreferrer" href="https://pepy.tech/project/exegol" title=""><img src="https://static.pepy.tech/personalized-badge/exegol?period=total&units=international_system&left_color=grey&right_color=brightgreen&left_text=Downloads" alt="pip stats"></a>
<br><br>
<img alt="latest commit on master" src="https://img.shields.io/github/last-commit/ThePorgs/Exegol/master?label=latest%20release">
<img alt="latest commit on dev" src="https://img.shields.io/github/last-commit/ThePorgs/Exegol/dev?label=latest%20dev">
<br><br>
<img alt="current version" src="https://img.shields.io/badge/linux-supported-success">
<img alt="current version" src="https://img.shields.io/badge/windows-supported-success">
<img alt="current version" src="https://img.shields.io/badge/mac-supported-success">
<br>
<img alt="amd64" src="https://img.shields.io/badge/amd64%20(x86__64)-supported-success">
<img alt="arm64" src="https://img.shields.io/badge/arm64%20(aarch64)-supported-success">
<br><br>
<a target="_blank" rel="noopener noreferrer" href="https://twitter.com/intent/follow?screen_name=_nwodtuhs" title="Follow"><img src="https://img.shields.io/twitter/follow/_nwodtuhs?label=Shutdown&style=social" alt="Twitter Shutdown"></a>
<a target="_blank" rel="noopener noreferrer" href="https://twitter.com/intent/follow?screen_name=Dramelac_" title="Follow"><img src="https://img.shields.io/twitter/follow/Dramelac_?label=Dramelac&style=social" alt="Twitter Dramelac"></a>
<br>
<a target="_blank" rel="noopener noreferrer" href="https://www.blackhat.com/eu-22/arsenal/schedule/index.html#exegol-29180" title="Schedule">
<img alt="Black Hat Europe 2022" src="https://img.shields.io/badge/Black%20Hat%20Arsenal-Europe%202022-blueviolet">
</a>
<a target="_blank" rel="noopener noreferrer" href="https://www.blackhat.com/asia-23/arsenal/schedule/#exegol-professional-hacking-setup-30815" title="Schedule">
<img alt="Black Hat Asia 2023" src="https://img.shields.io/badge/Black%20Hat%20Arsenal-Asia%202023-blueviolet">
</a>
<a target="_blank" rel="noopener noreferrer" href="https://www.blackhat.com/us-23/arsenal/schedule/#exegol-professional-hacking-setup-31711" title="Schedule">
<img alt="Black Hat USA 2023" src="https://img.shields.io/badge/Black%20Hat%20Arsenal-USA%202023-blueviolet">
</a>
<br><br>
<a target="_blank" rel="noopener noreferrer" href="https://discord.gg/cXThyp7D6P" title="Join us on Discord"><img src="https://raw.githubusercontent.com/ThePorgs/Exegol-docs/main/.assets/discord_join_us.png" width="150" alt="Join us on Discord"></a>
<br><br>
</div>
> Exegol is a community-driven hacking environment, powerful and yet simple enough to be used by anyone in day to day engagements. Exegol is the best solution to deploy powerful hacking environments securely, easily, professionally.
> Exegol fits pentesters, CTF players, bug bounty hunters, researchers, beginners and advanced users, defenders, from stylish macOS users and corporate Windows pros to UNIX-like power users.
# Getting started
You can refer to the [Exegol documentations](https://exegol.readthedocs.io/en/latest/getting-started/install.html).
> Full documentation homepage: https://exegol.rtfd.io/.
## Project structure
Below are some bullet points to better understand how Exegol works
- This repository ([Exegol](https://github.com/ThePorgs/Exegol)) contains the code for the Python wrapper. It's the entrypoint of the Exegol project. The wrapper can be installed from sources, but [a PyPI package](https://pypi.org/project/Exegol/) is available.
- The [Exegol-images](https://github.com/ThePorgs/Exegol-images) repo is loaded as a submodule. It includes all necessary assets to build Docker images. Notabene: the image are already built and offered on [the official Dockerhub registry](https://hub.docker.com/repository/docker/nwodtuhs/exegol).
- The [Exegol-resources](https://github.com/ThePorgs/Exegol-resources) repo is loaded as a submodule. It includes all resources mentioned previously (LinPEAS, WinPEAS, LinEnum, PrivescCheck, SysinternalsSuite, mimikatz, Rubeus, PowerSploit and many more.).
- The [Exegol-docs](https://github.com/ThePorgs/Exegol-docs) repo for the documentation, destined for users as well as developpers and contributors. The GitHub repo holds the sources that are compiled on https://exegol.readthedocs.io/.
# Sponsors
<div align="center">
<a href="https://www.capgemini.com/" title="Follow">
<img width="300" src="https://upload.wikimedia.org/wikipedia/fr/thumb/b/b5/Capgemini_Logo.svg/1280px-Capgemini_Logo.svg.png">
</a>
</div>
Dramelac and I work at *Capgemini* and we thank them for allocating some time for us to develop and maintain Exegol! Visit Capgemini website at https://www.capgemini.com/.
___
<div align="center">
<a href="https://www.hackthebox.com/" title="Follow">
<img width="300" src="https://exegol.readthedocs.io/en/latest/_images/hackthebox.png">
</a>
</div>
We also thank *HackTheBox* for continuously supporting the community and for helping us financially to acquire the necessary hardware for supporting multiple architectures (AMD64, ARM64). Show some love at https://www.hackthebox.com/ !
| PypiClean |
/Ace_todolist-2.1-py3-none-any.whl/Ace_todolist/search.py |
# todo table에 존재하는 todo 항목을 검색하여 찾는 함수...
import sqlite3
from Ace_todolist import list_todo as printer
def search(option=None):
conn = sqlite3.connect("ace.db")
cur = conn.cursor()
# 검색한 항목을 담은 list
search_list = []
search_answer_list = ["i", "d", "t", "c"]
# 어떤 방법으로 찾고 싶은 지에 대한 input 함수 / 조건문
if option is None:
search_type = input("How do you want to search? (i: id, t: title, d: due, c: category) ")
while search_type not in search_answer_list:
print()
print("Incorrect type")
search_type = input("How do you want to search? (i: id, t: title, d: due, c: category) ")
else:
search_type = option
if search_type == "i":
search_id = input("what id: ")
sql = "select * from todo where id=?"
cur.execute(sql, (search_id, ))
rows = cur.fetchall()
for row in rows:
search_list.append(row)
printer.print_list(search_list)
elif search_type == "t":
search_title = input("what title: ")
search_list = contain_thing(search_title, 1)
printer.print_list(search_list)
elif search_type == "d":
search_due = input("what due: ")
search_list = contain_thing(search_due, 3)
printer.print_list(search_list)
elif search_type == "c":
search_category = input("what category: ")
search_list = contain_thing(search_category, 2)
printer.print_list(search_list)
cur.close()
conn.close()
return search_list
# 검색하는 단어를 포함하는 항목 모두 찾기
def contain_thing(what_search, num_index):
conn = sqlite3.connect("ace.db")
cur = conn.cursor()
# 검색하는 단어를 포함한 모든 항목 리스트
contain_list = []
# 존재하는 모든 항목에 대한 리스트
all_list = []
# 존재하는 모든 항목 담기
sql = "select * from todo where 1"
cur.execute(sql)
rows = cur.fetchall()
for row in rows:
all_list.append(row)
# 검색하는 단어를 포함하는 항목 모두 찾기
for elem in all_list:
if what_search in elem[num_index]:
contain_list.append(elem)
cur.close()
conn.close()
return contain_list | PypiClean |
/BakalariAPI-4.0.0-py3-none-any.whl/bakalariapi/bakalari.py | from __future__ import annotations
import json
import logging
import warnings
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Callable, Literal, overload
import requests
from .utils import parseHTML
LOGGER = logging.getLogger("bakalariapi")
LOGGER.addHandler(logging.NullHandler())
__all__ = ["Endpoint", "BakalariAPI", "LAST_SUPPORTED_VERSION", "GetMode"]
LAST_SUPPORTED_VERSION = "1.45"
class Endpoint:
"""Enum endpointů pro Bakaláře"""
LOGIN = "/login"
LOGOUT = "/logout"
DASHBOARD = "/dashboard"
KOMENS = "/next/komens.aspx"
KOMENS_GET = "/next/komens.aspx/GetMessageData"
KOMENS_CONFIRM = "/next/komens.aspx/SetMessageConfirmed"
FILE = "/next/getFile.aspx"
ROZVRH = "/next/rozvrh.aspx"
GRADES = "/next/prubzna.aspx"
SESSION_INFO = "/sessioninfo"
SESSION_EXTEND = "/sessionextend"
MEETINGS_OVERVIEW = "/Collaboration/OnlineMeeting/MeetingsOverview"
MEETINGS_INFO = "/Collaboration/OnlineMeeting/Detail/"
USER_INFO = "/next/osobni_udaje.aspx"
HOMEWORKS = "/next/ukoly.aspx"
HOMEWORKS_DONE = "/HomeWorks/MarkAsFinished"
_ENDPOINT_DICT: dict[str, str] = {}
Endpoint._ENDPOINT_DICT = {
name: path for name, path in Endpoint.__dict__.items() if not name.startswith("_")
}
_parsers: dict[
str, dict[Any, list[Callable[[looting.GetterOutput], looting.ResultSet]]]
] = {x: {} for x in Endpoint._ENDPOINT_DICT.values()}
_resolvers: dict[
type[BakalariObject],
list[Callable[[BakalariAPI, UnresolvedID], BakalariObject | None]],
] = {}
def _register_parser(endpoint: str, type_: type[looting.GetterOutputTypeVar]):
"""Dekorátor, který zaregistruje funkci jako parser pro daný endpoint.
Pro běžné užití BakalářiAPI není doporučeno tento dekorátor používat.
Samotný dekorátor funkci nijak neupravuje.
Dekorovaná funkce by měla brát GetterOutput (typu, který se passuje jako argument `type_` tohoto dekorátoru) a měla by vracet looting.ResultSet či None, pokud není schopná z daného GetterOutput(u) nic získat.
Args:
endpoint:
Endpoint, který daná funkce umí parsovat.
type_:
Typ generické třídy GetterOutput, který funkce přijímá.
"""
LOGGER.debug("New parser registered for endpoint '%s' (Type: %s)", endpoint, type_)
def decorator(
func: Callable[
[looting.GetterOutput[looting.GetterOutputTypeVar]], looting.ResultSet
]
):
_parsers[endpoint].setdefault(type_, []).append(func)
return func
return decorator
def _register_resolver(type_: type[BakalariObj]):
"""Dekorátor, který zaregistruje funkci jako resolver pro daný typ.
Pro běžné užití BakalářiAPI není doporučeno tento dekorátor používat.
Samotný dekorátor funkci nijak neupravuje.
Dekorovaná funkce by měla brát UnresolvedID a měla by vracet typ, který se passuje v argumentu `type_` tohoto dekorátoru nebo None, pokud funkce není schopná resolvovat dané UnresovedID.
Args:
type_:
Typ/Třída, pro kterou je tato funkce resolverem.
"""
LOGGER.debug("New resolver registered for type %s", type_)
def decorator(
func: Callable[[BakalariAPI, UnresolvedID[BakalariObj]], BakalariObj]
):
_resolvers.setdefault(type_, []).append(func)
return func
return decorator
def _parse(
getter_output: looting.GetterOutput[looting.GetterOutputTypeVar],
) -> looting.ResultSet:
"""Extrahují se data z GetterOutput instance za pomoci registrovaných parserů.
Data získaná skrze tuto metodu jsou automaticky ukládána v looting instanci.
Pro běžné užití BakalářiAPI není tato funkce nutná. Pokud nevíte, jestli tuto
funkci máte/potřebujete použít, tak ji nepotřebujete.
Args:
getter_output:
GetterOutput, ze kterého se mají data extrahovat.
Returns:
looting.ResultSet, který obsahuje všechna data od jednotlivých parserů.
"""
output = looting.ResultSet()
for parser in _parsers[getter_output.endpoint].setdefault(getter_output.type, []):
parser_output = parser(getter_output)
if parser_output is not None:
output.merge(parser_output)
return output
def _resolve(
unresolved: UnresolvedID | list[UnresolvedID] | looting.ResultSet,
bakalariAPI: BakalariAPI,
silence_querry_errors: bool = False,
) -> looting.ResultSet:
"""Pokusí se získat plnohodnotný objekt pro dané UnresolvedID za pomoci registrovaných resolverů.
Data získaná skrze tuto metodu jsou automaticky ukládána v looting instanci.
Pro běžné užití BakalářiAPI není tato funkce nutná. Pokud nevíte, jestli tuto
funkci máte/potřebujete použít, tak ji nepotřebujete.
Args:
unresolved:
Jedno nebo více UnresolvedID, pro které se BakalářiAPI pokusí získat plnohodnotný objekt.
Returns:
looting.ResultSet, který obsahuje všechna data od jednotlivých resolverů.
"""
if isinstance(unresolved, looting.ResultSet):
output = unresolved
unresolved = output.get(UnresolvedID)
output.remove(UnresolvedID)
else:
output = looting.ResultSet()
if not isinstance(unresolved, list):
unresolved = [unresolved]
for o in unresolved:
if o.type in _resolvers:
resolved = False
for resolver in _resolvers[o.type]:
try:
tmp = resolver(bakalariAPI, o)
except exceptions.BakalariQuerrySuccessError as e:
if silence_querry_errors:
continue
raise e
if tmp is not None:
output.add_loot(tmp)
resolved = True
break
if not resolved:
output.add_loot(o)
else:
output.add_loot(o)
return output
def is_version_supported(version: str):
"""Zkontroluje, jestli `BakaláriAPI` podporuje danou verzi Bakalářů.
Args:
version:
Verze, která se má zkontrolovat.
Returns:
Vrátí `True` pokud se shodují, jinak `False`.
"""
return version.startswith(LAST_SUPPORTED_VERSION)
class GetMode(Enum):
"""Enum určující mód při získávání dat.
CACHED - Data se získají pouze z `Looting` instance
FRESH - Data se získají pouze ze serveru
CACHED_OR_FRESH - Nejprve se zkusí načíst data z `Looting` instance, pokud zde nejsou, načtou se data ze serveru
"""
CACHED = 0
FRESH = 1
CACHED_OR_FRESH = 2
class BakalariAPI:
"""Hlavní třída BakalářiAPI. Pro normální použití stačí pouze tato classa.
Attributes:
username:
Jméno pro přihlášení do Bakalářů.
password:
Heslo pro přihlášení do Bakalářů.
selenium_handler:
Instance classy SeleniumHandler obsahující nastavení Selenia.
session_manager:
Instance classy SessionMannager spravující sessiony.
looting:
Instance classy Looting spravující nálezy.
user_info:
Instance classy UserInfo obsahující údaje o uživaleli.
server_info:
Instance classy ServerInfo obsahující údaje o serveru a Bakalářích.
is_partial_init:
Indikuje, zda je instance částečně nebo plně inicializována.
Je `True` pokud částečně, `False` pokud plně.
Pokud je `True`, tak je možnost
Pozn.:
"get" metody mohou při `GetMode.FRESH` a `GetMode.CACHED_OR_FRESH` mohou vyvolat výjimku `PartialInitError`, pokud není instance plně inicializována.
"""
@property
def is_partial_init(self) -> bool:
return (
self.server_info.url is None
or self.username is None
or self.password is None
)
def __init__(
self,
url: str | None,
username: str | None = "",
password: str | None = "",
seleniumHandler: seleniumhandler.SeleniumHandler | None = None,
):
self.username: str | None = username
self.password: str | None = password
self.selenium_handler: seleniumhandler.SeleniumHandler | None = seleniumHandler
self.session_manager: sessions.SessionManager = sessions.SessionManager(
self, True
)
self.looting: looting.Looting = looting.Looting()
self.user_info: UserInfo = UserInfo()
self.server_info: ServerInfo = ServerInfo(url)
def get_endpoint(self, endpoint: str) -> str:
"""Vrátí celou URL adresu daného endpointu.
Vrácenou URL generuje přidáním URL aplikace/serveru Bakalářů před adresu endpointu.
Args:
endpoint:
Adresa endpoinut.
Měla by být vždy získána přes Endpoint třídu, tedy Endpoint.NEJAKY_ENDPOINT.
Returns:
Celou URL endpointu.
Raises:
PartialInitError: Pokud není známa URL serveru.
"""
if self.server_info.url is None:
raise exceptions.PartialInitError()
return self.server_info.url + endpoint
def kill(self, nice: bool = True):
"""Ukončí všechny sessiony.
Stejné jako volání 'session_manager.kill_all()'.
Argumenty:
nice:
Měly by se ukončit "mírumilovně"? (Default: True)
((Pro význam slova "mírumilovně" viz BakalariSession.kill()))
"""
self.session_manager.kill_all(nice)
def is_server_running(self) -> bool:
"""Zjistí, zda server/aplikace Bakalářů běží.
Returns:
True pokud server/aplikace běží, False pokud neběží.
Raises:
PartialInitError: Pokud není známa URL serveru.
"""
if self.server_info.url is None:
raise exceptions.PartialInitError()
try:
response = requests.get(self.server_info.url)
response.raise_for_status()
except requests.exceptions.RequestException:
return False
return True
def is_login_valid(self) -> bool:
"""Zjistí, zda jsou přihlašovací údaje správné.
Returns:
True pokud jsou přihlašovací údaje správné, False pokud nejsou.
Raises:
PartialInitError: Pokud není plně instance inicializována.
"""
with self.session_manager.get_session_or_create(
sessions.RequestsSession
) as session:
output = session.login()
# Pokud login není validní, potřebujeme se sessionu zbavit => zabít ho a odstranit z session manageru
if not output:
session.kill()
self.session_manager.unregister_session(session)
return output
def init(self):
"""Získá některé informace o systému Bakaláři a uživatelovi.
Volání této metody není nutné, avšak zatím není (implementován) jiný způsob, jak tyto informace získat.
"""
with self.session_manager.get_session_or_create(
sessions.RequestsSession
) as session:
getter_output = looting.GetterOutput(
Endpoint.USER_INFO,
parseHTML(session.get(self.get_endpoint(Endpoint.USER_INFO)).content),
)
self._parse(getter_output)
# Možná by se mohl registrovat parser
data = json.loads(getter_output.data.head["data-pageinfo"]) # type: ignore # Jelikož "head" může být None, tak Pylance naříká
self.user_info.type = data["userType"]
self.user_info.hash = data["userHash"]
self.server_info.version = data["applicationVersion"]
self.server_info.version_date = datetime.strptime(data["appVersion"], "%Y%m%d")
self.server_info.evid_number = int(data["evidNumber"])
if not self.is_version_supported():
warnings.warn(exceptions.VersionMismatchWarning())
def is_version_supported(self):
"""Zkontroluje, jestli `BakaláriAPI` podporuje verzi Bakalářů, která je na serveru.
Returns:
Vrátí `True` pokud se shodují, jinak `False`.
Pokud verze Bakalářů nebyla získána (tzn. je `None`), vrátí `False`.
"""
return (
False
if self.server_info.version is None
else is_version_supported(self.server_info.version)
)
# GRADES
@overload
def get_grades(self, mode: Literal[GetMode.CACHED]) -> list[Grade]:
"""Načte a vrátí známky z vlastní looting instance.
Returns:
List známek, které byl získány v minulosti.
"""
@overload
def get_grades(
self, mode: Literal[GetMode.FRESH], *, from_date: datetime | None = None
) -> list[Grade]:
"""Nově načte a vrátí známky.
Args:
from_date:
Pokud není None, načtou se známky pouze od daného data (včetně).
Pokud je None, načtou se známky pouze ze současného pololetí.
Returns:
Nově načtený list známek.
"""
@overload
def get_grades(
self,
mode: Literal[GetMode.CACHED_OR_FRESH],
*,
from_date: datetime | None = None,
) -> list[Grade]:
"""Načte a vrátí známky z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné známky, pokusí se načíst nové.
Pokud jsou známky přítomny v looting instanci, argumenty této metody jsou nepodstatné.
Args:
from_date:
Pokud není None, načtou se známky pouze od daného data (včetně).
Pokud je None, načtou se známky pouze ze současného pololetí.
Returns:
Načtený list známek.
"""
def get_grades(self, mode: GetMode, **kwargs) -> list[Grade]:
kwargs = {"from_date": None, **kwargs}
if mode == GetMode.CACHED:
return self.looting.get(Grade)
elif mode == GetMode.FRESH:
if self.is_partial_init:
raise exceptions.PartialInitError()
return self._parse(modules.grades.getter(self, kwargs["from_date"])).get(
Grade
)
elif mode == GetMode.CACHED_OR_FRESH:
output = self.get_grades(GetMode.CACHED)
return (
self.get_grades(GetMode.FRESH, **kwargs) if len(output) == 0 else output
)
raise ValueError
def get_all_grades(self) -> list[Grade]:
"""Nově načte a vrátí všechny známky.
Vždy načítá čerstvá data z Bakalářů.
Returns:
Nově načtený list všech známek.
"""
return self.get_grades(GetMode.FRESH, from_date=datetime(1, 1, 1))
# HOMEWORKS
@overload
def get_homeworks(self, mode: Literal[GetMode.CACHED]) -> list[Homework]:
"""Načte a vrátí úkoly z vlastní looting instance.
Returns:
List úkolů, které byl získány v minulosti.
"""
@overload
def get_homeworks(
self,
mode: Literal[GetMode.FRESH],
*,
fast_mode: Literal[True],
) -> list[Homework]:
"""Nově načte a vrátí úkoly.
Args:
fast_mode:
Určuje mód načítání úkolů. Pokud je `True`, vykoná načtení úkolů v "rychlém módu".
"Rychlý mód" načte úkoly podstatně rychleji než "pomalý mód", ale dokáže načíst pouze prvních 20 aktivních nehotových úkolů.
Pokud `False`, načtení úkolů proběhne v "pomalém módu", který má více možností.
Returns:
Nově načtený list úkolů.
"""
@overload
def get_homeworks(
self,
mode: Literal[GetMode.FRESH],
*,
fast_mode: Literal[False],
unfinished_only: bool = True,
only_first_page: bool = False,
first_loading_timeout: float = 5,
second_loading_timeout: float = 10,
) -> list[Homework]:
"""Nově načte a vrátí úkoly.
Args:
fast_mode:
Určuje mód načítání úkolů. Pokud je `True`, vykoná načtení úkolů v "rychlém módu".
"Rychlý mód" načte úkoly podstatně rychleji než "pomalý mód", ale dokáže načíst pouze prvních 20 aktivních nehotových úkolů.
Pokud `False`, načtení úkolů proběhne v "pomalém módu", který má více možností.
unfinished_only:
Pokud je `True`, načte pouze úkoly označené jako nehotové.
Pokud je `False`, načte hotové i nehotové úkoly.
only_first_page:
Pokud je `True`, načte úkoly jen z první stránky na Bakalářích.
Pokud je `False`, načte úkoly ze všech stránek.
Při užití metody je dobré zvážit, že načítání jednotlivých stránek úkolů je poměrně zdlouhavé.
first_loading_timeout:
Pro normální použití je vhodné nechat tak jak je.
Určuje počet sekund, během kterých se vyčkává na zahájení načítání stránky.
Pokud je číslo malé, je možné, že se nenačtou všechny úkoly.
Pokud je číslo příliš velké, je možné, že zde bude v určitých případech veliká ztráta času.
second_loading_timeout:
Pro normální použití je vhodné nechat tak jak je.
Určuje počet sekund, během kterých se vyčkává na skončení načítání stránky.
Pokud je číslo malé, je možné, že BakalářiAPI usoudí, že v Bakalářích došlo k chybě a nenačte všechny úkoly.
Pokud je číslo příliš velké, je možné, že zde bude v určitých případech veliká ztráta času.
Returns:
Nově načtený list úkolů.
"""
@overload
def get_homeworks(
self,
mode: Literal[GetMode.CACHED_OR_FRESH],
*,
fast_mode: Literal[True],
) -> list[Homework]:
"""Načte a vrátí úkoly z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné úkoly, pokusí se načíst nové.
Pokud jsou úkoly přítomny v looting instanci, argumenty této metody jsou nepodstatné.
Args:
fast_mode:
Určuje mód načítání úkolů. Pokud je `True`, vykoná načtení úkolů v "rychlém módu".
"Rychlý mód" načte úkoly podstatně rychleji než "pomalý mód", ale dokáže načíst pouze prvních 20 aktivních nehotových úkolů.
Pokud `False`, načtení úkolů proběhne v "pomalém módu", který má více možností.
Returns:
Načtený list úkolů.
"""
@overload
def get_homeworks(
self,
mode: Literal[GetMode.CACHED_OR_FRESH],
*,
fast_mode: Literal[False],
unfinished_only: bool = True,
only_first_page: bool = False,
first_loading_timeout: float = 5,
second_loading_timeout: float = 10,
) -> list[Homework]:
"""Načte a vrátí úkoly z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné úkoly, pokusí se načíst nové.
Pokud jsou úkoly přítomny v looting instanci, argumenty této metody jsou nepodstatné.
Args:
fast_mode:
Určuje mód načítání úkolů. Pokud je `True`, vykoná načtení úkolů v "rychlém módu".
"Rychlý mód" načte úkoly podstatně rychleji než "pomalý mód", ale dokáže načíst pouze prvních 20 aktivních nehotových úkolů.
Pokud `False`, načtení úkolů proběhne v "pomalém módu", který má více možností.
unfinished_only:
Pokud je True, načte pouze úkoly označené jako nehotové.
Pokud je False, načte hotové i nehotové úkoly.
only_first_page:
Pokud je True, načte úkoly jen z první stránky na Bakalářích.
Pokud je False, načte úkoly ze všech stránek.
Při užití metody je dobré zvážit, že načítání jednotlivých stránek úkolů je poměrně zdlouhavé.
first_loading_timeout:
Pro normální použití je vhodné nechat tak jak je.
Určuje počet sekund, během kterých se vyčkává na zahájení načítání stránky.
Pokud je číslo malé, je možné, že se nenačtou všechny úkoly.
Pokud je číslo příliš velké, je možné, že zde bude v určitých případech veliká ztráta času.
second_loading_timeout:
Pro normální použití je vhodné nechat tak jak je.
Určuje počet sekund, během kterých se vyčkává na skončení načítání stránky.
Pokud je číslo malé, je možné, že BakalářiAPI usoudí, že v Bakalářích došlo k chybě a nenačte všechny úkoly.
Pokud je číslo příliš velké, je možné, že zde bude v určitých případech veliká ztráta času.
Returns:
Načtený list úkolů.
"""
def get_homeworks(self, mode: GetMode, **kwargs) -> list[Homework]:
kwargs = {
"unfinished_only": True,
"only_first_page": False,
"first_loading_timeout": 5,
"second_loading_timeout": 10,
**kwargs,
}
if mode == GetMode.CACHED:
return self.looting.get(Homework)
elif mode == GetMode.FRESH:
if self.is_partial_init:
raise exceptions.PartialInitError()
if kwargs["fast_mode"]:
return self._parse(modules.homeworks.getter_fast(self)).get(Homework)
else:
return modules.homeworks.get_slow(
self,
kwargs["unfinished_only"],
kwargs["only_first_page"],
kwargs["first_loading_timeout"],
kwargs["second_loading_timeout"],
).get(Homework)
elif mode == GetMode.CACHED_OR_FRESH:
output = self.get_homeworks(GetMode.CACHED)
return (
self.get_homeworks(GetMode.FRESH, **kwargs)
if len(output) == 0
else output
)
raise ValueError
def get_all_homeworks(self) -> list[Homework]:
"""Nově načte a vrátí všechny úkoly.
Vždy načítá čerstvá data z Bakalářů a načtení úkolů proběhne v "pomalém módu".
Returns:
Nově načtený list všech úkolů.
"""
return self.get_homeworks(
GetMode.FRESH, fast_mode=False, unfinished_only=False, only_first_page=False
)
# MEETINGS
@overload
def get_meetings(self, mode: Literal[GetMode.CACHED]) -> list[Meeting]:
"""Načte a vrátí schůzky z vlastní looting instance.
Returns:
List schůzek, které byl získány v minulosti.
"""
@overload
def get_meetings(self, mode: Literal[GetMode.FRESH]) -> list[Meeting]:
"""Nově načte a vrátí nadcházející schůzky.
Returns:
Nově načtený list nadcházejících schůzek.
"""
@overload
def get_meetings(
self, mode: Literal[GetMode.FRESH], *, from_date: datetime, to_date: datetime
) -> list[Meeting]:
"""Nově načte a vrátí schůzky.
Je nutné specifikovat horní i dolní časovou hranici. Nejmenší možný čas je `datetime(1, 1, 1)`, největší možný je `datetime(9999, 12, 31, 23, 59, 59)`.
Args:
from_date:
Určuje datum a čas, od kterého se mají schůzky načíst.
to_date:
Určuje datum a čas, do kterého se mají schůzky načíst.
Returns:
Nově načtený list schůzek.
"""
@overload
def get_meetings(self, mode: Literal[GetMode.CACHED_OR_FRESH]) -> list[Meeting]:
"""Načte a vrátí schůzky z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné schůzky, pokusí se načíst nové nadchézející schůzky.
Returns:
Načtený list schůzek.
"""
@overload
def get_meetings(
self,
mode: Literal[GetMode.CACHED_OR_FRESH],
*,
from_date: datetime,
to_date: datetime,
) -> list[Meeting]:
"""Načte a vrátí schůzky z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné schůzky, pokusí se načíst nové.
Je nutné specifikovat horní i dolní časovou hranici. Nejmenší možný čas je `datetime(1, 1, 1)`, největší možný je `datetime(9999, 12, 31, 23, 59, 59)`.
Pokud jsou schůzky přítomny v looting instanci, argumenty této metody jsou nepodstatné.
Args:
from_date:
Určuje datum a čas, od kterého se mají schůzky načíst.
to_date:
Určuje datum a čas, do kterého se mají schůzky načíst.
Returns:
Načtený list schůzek.
"""
def get_meetings(self, mode: GetMode, **kwargs) -> list[Meeting]:
if mode == GetMode.CACHED:
return self.looting.get(Meeting)
elif mode == GetMode.FRESH:
if self.is_partial_init:
raise exceptions.PartialInitError()
if "from_date" in kwargs:
return self._resolve(
self._parse(
modules.meetings.getter_meetings_ids(
self, kwargs["from_date"], kwargs["to_date"]
)
)
).get(Meeting)
else:
return self._resolve(
self._parse(modules.meetings.getter_future_meetings_ids(self)).get(
UnresolvedID
)
).get(Meeting)
elif mode == GetMode.CACHED_OR_FRESH:
output = self.get_meetings(GetMode.CACHED)
return (
self.get_meetings(GetMode.FRESH, **kwargs)
if len(output) == 0
else output
)
raise ValueError
def get_all_meetings(self) -> list[Meeting]:
"""Nově načte a vrátí všechny schůzky.
Vždy načítá čerstvá data z Bakalářů.
Returns:
Nově načtený list všech schůzek.
"""
return self.get_meetings(
GetMode.FRESH,
from_date=datetime(1, 1, 1),
to_date=datetime(9999, 12, 31, 23, 59, 59),
)
# STUDENTS
@overload
def get_students(self, mode: Literal[GetMode.CACHED]) -> list[Student]:
"""Načte a vrátí studenty z vlastní looting instance.
Returns:
List studentů, kteří byl získány v minulosti.
"""
@overload
def get_students(self, mode: Literal[GetMode.FRESH]) -> list[Student]:
"""Nově načte a vrátí seznam studentů.
Returns:
Nově načtený list studentů.
"""
@overload
def get_students(self, mode: Literal[GetMode.CACHED_OR_FRESH]) -> list[Student]:
"""Načte a vrátí studenty z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádní studenti, pokusí se načíst nové.
Returns:
Načtený list studentů.
"""
def get_students(self, mode: GetMode) -> list[Student]:
if mode == GetMode.CACHED:
return self.looting.get(Student)
elif mode == GetMode.FRESH:
if self.is_partial_init:
raise exceptions.PartialInitError()
return self._parse(modules.meetings.getter_future_meetings_ids(self)).get(
Student
)
elif mode == GetMode.CACHED_OR_FRESH:
output = self.get_students(GetMode.CACHED)
return self.get_students(GetMode.FRESH) if len(output) == 0 else output
raise ValueError
# KOMENS
@overload
def get_komens(self, mode: Literal[GetMode.CACHED]) -> list[Komens]:
"""Načte a vrátí komens zprávy z vlastní looting instance.
Returns:
List komens zpráv, které byl získány v minulosti.
"""
@overload
def get_komens(
self,
mode: Literal[GetMode.FRESH],
*,
from_date: datetime | None = None,
to_date: datetime | None = None,
limit: int | None = None,
) -> list[Komens]:
"""Nově načte a vrátí komens zprávy.
Kvůli limitaci Bakalářů je možné načíst pouze 300 zpráv na jednou.
Args:
from_date:
Pokud není None, načtou se komens zprávy pouze od daného data.
Pokue není None a parametr `to_date` je None, načtou se komens zprávy od daného data do současnosti.
Pokud oba parametry `from_date` a `to_date` jsou None, načtou se komens zprávy pouze za poslední měsíc.
to_date:
Pokud není None, načtou se komens zprávy pouze do daného data.
Pokue není None a parametr `from_date` je None, načtou se všechny komens zprávy do daného data.
Pokud oba parametry `from_date` a `to_date` jsou None, načtou se komens zprávy pouze za poslední měsíc.
limit:
Určuje limit, kolik zpráv se maximálně načte.
Při užití metody je dobré zvážit, že načítání jednotlivých zpráv je poměrně zdlouhavé.
Returns:
Nově načtený list komens zpráv.
"""
@overload
def get_komens(
self,
mode: Literal[GetMode.CACHED_OR_FRESH],
*,
from_date: datetime | None = None,
to_date: datetime | None = None,
limit: int | None = None,
) -> list[Komens]:
"""Načte a vrátí komens zprávy z vlastní looting instance. Pokud v looting instanci nejsou přítomny žádné komens zprávy, pokusí se načíst nové.
Kvůli limitaci Bakalářů je možné případně načíst pouze 300 zpráv na jednou.
Pokud jsou schůzky přítomny v looting instanci, argumenty této metody jsou nepodstatné.
Args:
from_date:
Pokud není None, načtou se komens zprávy pouze od daného data.
Pokue není None a parametr `to_date` je None, načtou se komens zprávy od daného data do současnosti.
Pokud oba parametry `from_date` a `to_date` jsou None, načtou se komens zprávy pouze za poslední měsíc.
to_date:
Pokud není None, načtou se komens zprávy pouze do daného data.
Pokue není None a parametr `from_date` je None, načtou se všechny komens zprávy do daného data.
Pokud oba parametry `from_date` a `to_date` jsou None, načtou se komens zprávy pouze za poslední měsíc.
limit:
Určuje limit, kolik zpráv se maximálně načte.
Při užití metody je dobré zvážit, že načítání jednotlivých zpráv je poměrně zdlouhavé.
Returns:
Načtený list komens zpráv.
"""
def get_komens(self, mode: GetMode, **kwargs) -> list[Komens]:
kwargs = {"from_date": None, "to_date": None, "limit": None, **kwargs}
if mode == GetMode.CACHED:
return self.looting.get(Komens)
elif mode == GetMode.FRESH:
if self.is_partial_init:
raise exceptions.PartialInitError()
return self._resolve(
self._parse(
modules.komens.getter_komens_ids(
self, kwargs["from_date"], kwargs["to_date"]
)
).get(UnresolvedID)[: kwargs["limit"]]
).get(Komens)
elif mode == GetMode.CACHED_OR_FRESH:
output = self.get_komens(GetMode.CACHED)
return (
self.get_komens(GetMode.FRESH, **kwargs) if len(output) == 0 else output
)
raise ValueError
def get_all_komens(self) -> list[Komens]:
"""Nově načte a vrátí všechny komens zprávy.
Vždy načítá čerstvá data z Bakalářů.
Kvůli limitaci Bakalářů je možné načíst pouze 300 zpráv na jednou.
Returns:
Nově načtený list všech komens zpráv.
"""
return self.get_komens(
GetMode.FRESH,
from_date=datetime(1953, 1, 1),
to_date=datetime.today() + timedelta(1),
)
def _parse(
self, getter_output: looting.GetterOutput[looting.GetterOutputTypeVar]
) -> looting.ResultSet:
"""Extrahují se data z GetterOutput instance za pomoci registrovaných parserů.
Data získaná skrze tuto metodu jsou automaticky ukládána v looting instanci.
Pro běžné užití BakalářiAPI není tato funkce nutná. Pokud nevíte, jestli tuto
funkci máte/potřebujete použít, tak ji nepotřebujete.
Args:
getter_output:
GetterOutput, ze kterého se mají data extrahovat.
Returns:
ResultSet, který obsahuje všechna data od jednotlivých parserů.
"""
output = _parse(getter_output)
self.looting.add_result_set(output)
return output
def _resolve(
self,
unresolved: UnresolvedID | list[UnresolvedID] | looting.ResultSet,
silence_querry_errors: bool = False,
) -> looting.ResultSet:
"""Pokusí se získat plnohodnotný objekt pro dané UnresolvedID za pomoci registrovaných resolverů.
Data získaná skrze tuto metodu jsou automaticky ukládána v looting instanci.
Pro běžné užití BakalářiAPI není tato funkce nutná. Pokud nevíte, jestli tuto
funkci máte/potřebujete použít, tak ji nepotřebujete.
Args:
unresolved:
Jedno nebo více UnresolvedID, pro které se BakalářiAPI pokusí získat plnohodnotný objekt.
Returns:
ResultSet, který obsahuje všechna data od jednotlivých resolverů.
Raises:
PartialInitError: Pokud není instance plně inicializována.
"""
if self.is_partial_init:
raise exceptions.PartialInitError()
output = _resolve(unresolved, self, silence_querry_errors)
self.looting.add_result_set(output)
return output
from . import exceptions, looting, modules, seleniumhandler, sessions
from .objects import (
BakalariObj,
BakalariObject,
Grade,
Homework,
Komens,
Meeting,
ServerInfo,
Student,
UnresolvedID,
UserInfo,
) | PypiClean |
/OTLModel/Classes/SteunStandaard.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from abc import abstractmethod
from OTLMOW.OTLModel.Classes.AIMNaamObject import AIMNaamObject
from OTLMOW.OTLModel.Classes.EMDraagconstructie import EMDraagconstructie
from OTLMOW.OTLModel.Datatypes.DteKleurRAL import DteKleurRAL
from OTLMOW.OTLModel.Datatypes.KlDraagConstrBeschermlaag import KlDraagConstrBeschermlaag
from OTLMOW.OTLModel.Datatypes.KlDraagConstrBijzondertransport import KlDraagConstrBijzondertransport
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OTLClassCreator. To modify: extend, do not edit
class SteunStandaard(AIMNaamObject, EMDraagconstructie):
"""Abstracte voor de standaard steunen."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
@abstractmethod
def __init__(self):
AIMNaamObject.__init__(self)
EMDraagconstructie.__init__(self)
self._beschermlaag = OTLAttribuut(field=KlDraagConstrBeschermlaag,
naam='beschermlaag',
label='beschermlaag',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard.beschermlaag',
definition='Type bescherming van de steun, bv. geschilderd of gegalvaniseerd.',
owner=self)
self._bijzonderTransport = OTLAttribuut(field=KlDraagConstrBijzondertransport,
naam='bijzonderTransport',
label='bijzonder transport',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard.bijzonderTransport',
definition='Wijze waarop het object eventueel geschikt is om bijzonder transport mogelijk te maken.',
owner=self)
self._fabrikant = OTLAttribuut(field=StringField,
naam='fabrikant',
label='fabrikant',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard.fabrikant',
definition='De fabrikant van de steun.',
owner=self)
self._hoogteBovenkant = OTLAttribuut(field=KwantWrdInMeter,
naam='hoogteBovenkant',
label='hoogte bovenkant',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard.hoogteBovenkant',
definition='Hoogte (in meter) van de bovenkant van de steun.',
owner=self)
self._kleur = OTLAttribuut(field=DteKleurRAL,
naam='kleur',
label='kleur',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#SteunStandaard.kleur',
definition='De RAL kleur van het uitwendig zichtbare gedeelte.',
owner=self)
@property
def beschermlaag(self):
"""Type bescherming van de steun, bv. geschilderd of gegalvaniseerd."""
return self._beschermlaag.get_waarde()
@beschermlaag.setter
def beschermlaag(self, value):
self._beschermlaag.set_waarde(value, owner=self)
@property
def bijzonderTransport(self):
"""Wijze waarop het object eventueel geschikt is om bijzonder transport mogelijk te maken."""
return self._bijzonderTransport.get_waarde()
@bijzonderTransport.setter
def bijzonderTransport(self, value):
self._bijzonderTransport.set_waarde(value, owner=self)
@property
def fabrikant(self):
"""De fabrikant van de steun."""
return self._fabrikant.get_waarde()
@fabrikant.setter
def fabrikant(self, value):
self._fabrikant.set_waarde(value, owner=self)
@property
def hoogteBovenkant(self):
"""Hoogte (in meter) van de bovenkant van de steun."""
return self._hoogteBovenkant.get_waarde()
@hoogteBovenkant.setter
def hoogteBovenkant(self, value):
self._hoogteBovenkant.set_waarde(value, owner=self)
@property
def kleur(self):
"""De RAL kleur van het uitwendig zichtbare gedeelte."""
return self._kleur.get_waarde()
@kleur.setter
def kleur(self, value):
self._kleur.set_waarde(value, owner=self) | PypiClean |
/FNEtodo-0.1.1-py3-none-any.whl/todo/commandline.py |
import os
import sys
from argparse import ArgumentParser
import todo
FILE_NAME = sys.argv[0].split('/')[-1]
STORAGE = {
'active_path': f'{os.environ.get("HOME")}/.todo-list-active',
'completed_path': f'{os.environ.get("HOME")}/.todo-list-completed',
}
HELPS = {
'command': f'{FILE_NAME} add | list | complete | completed',
'add': f"{FILE_NAME} add 'My new task' 'Other task' 'more' or single item",
'complete': f'{FILE_NAME} complete 1',
'params': 'can be a task (string) or index (int)',
}
def read_list(storage=STORAGE['active_path']):
try:
with open(storage, 'r') as file_pointer:
lines = file_pointer.readlines()
return [line.strip() for line in lines]
except FileNotFoundError:
return []
def update_file(data, storage=STORAGE['active_path']): # pylint: disable=W0613
if not data:
with open(storage, 'w') as file_pointer:
pass
mode = 'w'
if not os.path.exists(storage):
mode = 'a'
with open(storage, mode) as file_pointer:
for line in data:
file_pointer.write(line + '\n')
def main(argv=None):
if argv is None:
argv = sys.argv
command_choices = ['list', 'add', 'complete', 'completed']
parser = ArgumentParser(prog=FILE_NAME)
parser.add_argument(
'command', choices=command_choices, type=str, nargs='?', help=HELPS['command']
)
parser.add_argument('params', type=str, nargs='*', help=HELPS['params'])
parser.add_argument('-v', '--version', action='version', version=todo.VERSION)
args = parser.parse_args()
if args.command is None:
parser.print_help()
return 0
if args.command == 'add':
if not args.params:
sys.stdout.write(f"please enter your task.\n\t{HELPS['add']}\n")
return 1
todo.Todo.items = read_list()
messages = []
for task in args.params:
status, message = todo.Todo.add(task)
messages.append(message)
if status:
update_file(todo.Todo.items)
if messages:
for message in messages:
sys.stdout.write(f'{message}\n')
todo.Todo.list_todos
return 0
if args.command == 'list':
sys.stdout.write('Current Tasks:\n')
todo.Todo.items = read_list()
todo.Todo.list_todos
return 0
if args.command == 'completed':
todo.Todo.completed = read_list((STORAGE['completed_path']))
todo.Todo.completed_list
return 0
if args.command == 'complete':
if not args.params:
sys.stdout.write(f"Please enter index. \n\t{HELPS['complete']}\n")
return 1
todo.Todo.items = read_list()
todo.Todo.completed = read_list(STORAGE['completed_path'])
status, message = todo.Todo.complete(int(args.params[0]))
sys.stdout.write(f'{message}\n')
if status:
update_file(todo.Todo.items)
update_file(todo.Todo.completed, STORAGE['completed_path'])
todo.Todo.list_todos
return 0
return 0
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/3ETool-0.8.3.tar.gz/3ETool-0.8.3/EEETools/BlockSubClasses/mixer.py | from EEETools.MainModules import Block
import xml.etree.ElementTree as ETree
from EEETools import costants
class Mixer(Block):
def __init__(self, inputID, main_class):
Block.__init__(self, inputID, main_class)
self.type = "mixer"
def is_ready_for_calculation(self):
return len(self.input_connections) >= 1 and len(self.output_connections) >= 1
def initialize_connection_list(self, input_list):
for elem in input_list:
new_conn = self.main_class.find_connection_by_index(abs(elem))
if not new_conn is None:
is_input = (elem > 0)
self.add_connection(new_conn, is_input)
def export_xml_connection_list(self) -> ETree.Element:
xml_connection_list = ETree.Element("Connections")
fluid_connections = ETree.SubElement(xml_connection_list, "FluidConnections")
for input_connection in self.external_input_connections:
input_xml = ETree.SubElement(fluid_connections, "input")
input_xml.set("index", str(input_connection.index))
for output_connection in self.external_output_connections:
output_xml = ETree.SubElement(fluid_connections, "output")
output_xml.set("index", str(output_connection.index))
return xml_connection_list
def append_xml_connection_list(self, input_list: ETree.Element):
fluid_connections = input_list.find("FluidConnections")
self.__add_connection_by_index(fluid_connections, "input")
self.__add_connection_by_index(fluid_connections, "output")
def __add_connection_by_index(self, input_list: ETree.Element, connection_name, append_to_support_block=None):
if connection_name == "input":
is_input = True
else:
is_input = False
for connection in input_list.findall(connection_name):
new_conn = self.main_class.find_connection_by_index(float(connection.get("index")))
if new_conn is not None:
self.add_connection(new_conn, is_input, append_to_support_block=append_to_support_block)
@classmethod
def return_EES_needed_index(cls):
return_dict = {"flow input": [1, True],
"flow output": [2, False]}
return return_dict
@classmethod
def return_EES_base_equations(cls):
return_element = dict()
variables_list = [{"variable": "flow input", "type": costants.ZONE_TYPE_PRESSURE},
{"variable": "flow output", "type": costants.ZONE_TYPE_PRESSURE}]
return_element.update({"pressure_continuity": {"variables": variables_list, "related_option": "none"}})
return return_element
def return_other_zone_connections(self, zone_type, input_connection):
if zone_type == costants.ZONE_TYPE_FLOW_RATE:
# In a mixer the flow rate is not preserved, hence an empty list is returned
return list()
elif zone_type == costants.ZONE_TYPE_FLUID:
# In a mixer the fluid type is preserved, hence if "input_connection" stream is connected to the
# block the methods returns each fluid stream connected to it
if self.connection_is_in_connections_list(input_connection):
return self.get_fluid_stream_connections()
else:
return list()
elif zone_type == costants.ZONE_TYPE_PRESSURE:
# In a mixer the pressure is preserved, hence if "input_connection" stream is connected to the
# block the methods returns each fluid stream connected to it
if self.connection_is_in_connections_list(input_connection):
return self.get_fluid_stream_connections()
else:
return list()
else:
return list() | PypiClean |
/DukeDSClient-2.0.2.tar.gz/DukeDSClient-2.0.2/ddsc/sdk/client.py | import os
from collections import OrderedDict
from ddsc.core.ddsapi import DataServiceAuth, DataServiceApi
from ddsc.config import create_config
from ddsc.core.remotestore import DOWNLOAD_FILE_CHUNK_SIZE
from ddsc.core.fileuploader import FileUploadOperations, ParallelChunkProcessor, ParentData
from ddsc.core.localstore import PathData
from ddsc.core.util import KindType
from future.utils import python_2_unicode_compatible
class Client(object):
"""
Client that connects to the DDSConnection base on ~/.ddsclient configuration.
This configuration can be customized by passing in a ddsc.config.Config object
"""
def __init__(self, config=create_config()):
"""
:param config: ddsc.config.Config: settings used to connect to DDSConnection
"""
self.dds_connection = DDSConnection(config)
def get_projects(self):
"""
Get list of all projects user has access to.
:return: [Project]: list of projects
"""
return self.dds_connection.get_projects()
def get_project_by_id(self, project_id):
"""
Retrieve a single project.
:param project_id:
:return: Project
"""
return self.dds_connection.get_project_by_id(project_id)
def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project
:param description: str: description of the project
:return: Project
"""
return self.dds_connection.create_project(name, description)
def get_folder_by_id(self, folder_id):
"""
Return details about a folder with the specified uuid
:param folder_id: str: uuid of the folder to fetch
:return: Folder
"""
return self.dds_connection.get_folder_by_id(folder_id)
def get_file_by_id(self, file_id):
"""
Return details about a file with the specified uuid
:param file_id: str: uuid of the file to fetch
:return: File
"""
return self.dds_connection.get_file_by_id(file_id)
class DDSConnection(object):
"""
Contains methods for accessing various DDSConnection API functionality
"""
def __init__(self, config):
"""
:param config: ddsc.config.Config: settings used to connect to DDSConnection
"""
self.config = config
self.data_service = DataServiceApi(DataServiceAuth(config), config.url)
def _create_array_response(self, resp, array_item_constructor):
items = resp.json()['results']
return [array_item_constructor(self, data_dict) for data_dict in items]
def _create_item_response(self, resp, item_constructor):
data_dict = resp.json()
return item_constructor(self, data_dict)
def get_projects(self):
"""
Get details for all projects you have access to in DDSConnection
:return: [Project]: list of projects
"""
return self._create_array_response(
self.data_service.get_projects(),
Project)
def get_project_by_id(self, project_id):
"""
Get details about project with the specified uuid
:param project_id: str: uuid of the project to fetch
:return: Project
"""
return self._create_item_response(
self.data_service.get_project_by_id(project_id),
Project)
def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
"""
return self._create_item_response(
self.data_service.create_project(name, description),
Project)
def delete_project(self, project_id):
"""
Delete the project with the specified uuid
:param project_id: str: uuid of the project to delete
"""
self.data_service.delete_project(project_id)
def create_folder(self, folder_name, parent_kind_str, parent_uuid):
"""
Create a folder under a particular parent
:param folder_name: str: name of the folder to create
:param parent_kind_str: str: kind of the parent of this folder
:param parent_uuid: str: uuid of the parent of this folder (project or another folder)
:return: Folder: folder metadata
"""
return self._create_item_response(
self.data_service.create_folder(folder_name, parent_kind_str, parent_uuid),
Folder
)
def delete_folder(self, folder_id):
"""
Delete the folder with the specified uuid
:param folder_id: str: uuid of the folder to delete
"""
self.data_service.delete_folder(folder_id)
def get_project_children(self, project_id, name_contains=None):
"""
Get direct files and folders of a project.
:param project_id: str: uuid of the project to list contents
:param name_contains: str: filter children based on a pattern
:return: [File|Folder]: list of Files/Folders contained by the project
"""
return self._create_array_response(
self.data_service.get_project_children(
project_id, name_contains
),
DDSConnection._folder_or_file_constructor
)
def get_folder_children(self, folder_id, name_contains=None):
"""
Get direct files and folders of a folder.
:param folder_id: str: uuid of the folder
:param name_contains: str: filter children based on a pattern
:return: File|Folder
"""
return self._create_array_response(
self.data_service.get_folder_children(
folder_id, name_contains
),
DDSConnection._folder_or_file_constructor
)
def get_file_download(self, file_id):
"""
Get a file download object that contains temporary url settings needed to download the contents of a file.
:param file_id: str: uuid of the file
:return: FileDownload
"""
return self._create_item_response(
self.data_service.get_file_url(file_id),
FileDownload
)
def upload_file(self, local_path, project_id, parent_data, existing_file_id=None, remote_filename=None):
"""
Upload a file under a specific location in DDSConnection possibly replacing an existing file.
:param local_path: str: path to a local file to upload
:param project_id: str: uuid of the project to add this file to
:param parent_data: ParentData: info about the parent of this file
:param existing_file_id: str: uuid of file to create a new version of (or None to create a new file)
:param remote_filename: str: name to use for our remote file (defaults to local_path basename otherwise)
:return: File
"""
path_data = PathData(local_path)
hash_data = path_data.get_hash()
file_upload_operations = FileUploadOperations(self.data_service, None)
upload_id = file_upload_operations.create_upload(project_id, path_data, hash_data,
remote_filename=remote_filename)
context = UploadContext(self.config, self.data_service, upload_id, path_data)
ParallelChunkProcessor(context).run()
remote_file_data = file_upload_operations.finish_upload(upload_id, hash_data, parent_data, existing_file_id)
return File(self, remote_file_data)
@staticmethod
def _folder_or_file_constructor(dds_connection, data_dict):
"""
Create a File or Folder based on the kind value in data_dict
:param dds_connection: DDSConnection
:param data_dict: dict: payload received from DDSConnection API
:return: File|Folder
"""
kind = data_dict['kind']
if kind == KindType.folder_str:
return Folder(dds_connection, data_dict)
elif data_dict['kind'] == KindType.file_str:
return File(dds_connection, data_dict)
def get_folder_by_id(self, folder_id):
"""
Get folder details for a folder id.
:param folder_id: str: uuid of the folder
:return: Folder
"""
return self._create_item_response(
self.data_service.get_folder(folder_id),
Folder
)
def get_file_by_id(self, file_id):
"""
Get folder details for a file id.
:param file_id: str: uuid of the file
:return: File
"""
return self._create_item_response(
self.data_service.get_file(file_id),
File
)
def delete_file(self, file_id):
self.data_service.delete_file(file_id)
class BaseResponseItem(object):
"""
Base class for responses from DDSConnection API converts dict into properties for subclasses.
"""
def __init__(self, dds_connection, data_dict):
"""
:param dds_connection: DDSConnection
:param data_dict: dict: dictionary response from DDSConnection API
"""
self.dds_connection = dds_connection
self._data_dict = dict(data_dict)
def __getattr__(self, key):
"""
Return property from the dictionary passed to the constructor.
"""
try:
return self._data_dict[key]
except KeyError:
msg = "'{}' object has no attribute '{}'".format(self.__class__.__name__, key)
raise AttributeError(msg)
@python_2_unicode_compatible
class Project(BaseResponseItem):
"""
Contains project details based on DDSConnection API response
"""
def __init__(self, dds_connection, data):
"""
:param dds_connection: DDSConnection
:param data: dict: dictionary response from DDSConnection API in project format
"""
super(Project, self).__init__(dds_connection, data)
def get_children(self):
"""
Fetch the direct children of this project.
:return: [File|Folder]
"""
return self.dds_connection.get_project_children(self.id)
def get_child_for_path(self, path):
"""
Based on a remote path get a single remote child.
:param path: str: path within a project specifying a file or folder to download
:return: File|Folder
"""
child_finder = ChildFinder(path, self)
return child_finder.get_child()
def create_folder(self, folder_name):
"""
Create a new folder as a top level child of this project.
:param folder_name: str: name of the folder to create
:return: Folder
"""
return self.dds_connection.create_folder(folder_name, KindType.project_str, self.id)
def upload_file(self, local_path, remote_filename=None):
"""
Upload a new file based on a file on the file system as a top level child of this project.
:param local_path: str: path to a file to upload
:param remote_filename: str: name to use for our remote file (defaults to local_path basename otherwise)
:return: File
"""
parent_data = ParentData(self.kind, self.id)
return self.dds_connection.upload_file(local_path, project_id=self.id, parent_data=parent_data,
remote_filename=remote_filename)
def delete(self):
"""
Delete this project and it's children.
"""
self.dds_connection.delete_project(self.id)
def __str__(self):
return u'{} id:{} name:{}'.format(self.__class__.__name__, self.id, self.name)
@python_2_unicode_compatible
class Folder(BaseResponseItem):
"""
Contains folder details based on DDSConnection API response
"""
def __init__(self, dds_connection, data):
"""
:param dds_connection: DDSConnection
:param data: dict: dictionary response from DDSConnection API in folder format
"""
super(Folder, self).__init__(dds_connection, data)
self.project_id = self.project['id']
def get_children(self):
"""
Fetch the direct children of this folder.
:return: [File|Folder]
"""
return self.dds_connection.get_folder_children(self.id)
def create_folder(self, folder_name):
"""
Create a new folder as a top level child of this folder.
:param folder_name: str: name of the folder to create
:return: Folder
"""
return self.dds_connection.create_folder(folder_name, KindType.folder_str, self.id)
def upload_file(self, local_path, remote_filename=None):
"""
Upload a new file based on a file on the file system as a top level child of this folder.
:param local_path: str: path to a file to upload
:param remote_filename: str: name to use for our remote file (defaults to local_path basename otherwise)
:return: File
"""
parent_data = ParentData(self.kind, self.id)
return self.dds_connection.upload_file(local_path, project_id=self.project_id, parent_data=parent_data,
remote_filename=remote_filename)
def delete(self):
"""
Delete this folder and it's children.
"""
self.dds_connection.delete_folder(self.id)
def __str__(self):
return u'{} id:{} name:{}'.format(self.__class__.__name__, self.id, self.name)
@python_2_unicode_compatible
class File(BaseResponseItem):
"""
Contains folder details based on DDSConnection API response
"""
def __init__(self, dds_connection, data):
"""
:param dds_connection: DDSConnection
:param data: dict: dictionary response from DDSConnection API in folder format
"""
super(File, self).__init__(dds_connection, data)
self.project_id = self.project['id']
def download_to_path(self, file_path):
"""
Download the contents of this file to a local file path
:param file_path: str: local filesystem path to write this file contents into, if none it will default to self.name
"""
file_download = self.dds_connection.get_file_download(self.id)
path = file_path
if not path:
path = self.name
file_download.save_to_path(path)
def delete(self):
"""
Delete this file and it's children.
"""
self.dds_connection.delete_file(self.id)
def upload_new_version(self, file_path):
"""
Upload a new version of this file.
:param file_path: str: local filesystem path to write this file contents into
:return: File
"""
parent_data = ParentData(self.parent['kind'], self.parent['id'])
return self.dds_connection.upload_file(file_path, project_id=self.project_id, parent_data=parent_data,
existing_file_id=self.id)
def __str__(self):
return u'{} id:{} name:{}'.format(self.__class__.__name__, self.id, self.name)
class FileDownload(BaseResponseItem):
"""
Contains file download url details based on DDSConnection API response
"""
def __init__(self, dds_connection, data):
"""
:param dds_connection: DDSConnection
:param data: dict: dictionary response from DDSConnection API in file download url format
"""
super(FileDownload, self).__init__(dds_connection, data)
def _get_download_response(self):
return self.dds_connection.data_service.receive_external(self.http_verb, self.host, self.url, self.http_headers)
def save_to_path(self, file_path, chunk_size=DOWNLOAD_FILE_CHUNK_SIZE):
"""
Save the contents of the remote file to a local path.
:param file_path: str: file path
:param chunk_size: chunk size used to write local file
"""
response = self._get_download_response()
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class FileUpload(object):
def __init__(self, project, remote_path, local_path):
self.project = project
self.remote_path = remote_path
if not self.remote_path:
self.remote_path = os.path.basename(local_path)
self.local_path = local_path
def run(self):
parts = self.remote_path.split(os.sep)
if len(parts) == 1:
self._upload_to_parent(self.project)
else:
folder_names = parts[:-1]
parent = self.project
for folder_name in folder_names:
folder = self._try_get_child(parent, folder_name)
if not folder:
folder = parent.create_folder(folder_name)
parent = folder
self._upload_to_parent(parent)
def _upload_to_parent(self, parent):
remote_filename = os.path.basename(self.remote_path)
child = self._try_get_child(parent, remote_filename)
if child:
child.upload_new_version(self.local_path)
else:
parent.upload_file(self.local_path, remote_filename=remote_filename)
@staticmethod
def _try_get_child(parent, child_name):
for child in parent.get_children():
if child.name == child_name:
return child
return None
class ChildFinder(object):
"""
Recursively looks for a child based on a path
"""
def __init__(self, remote_path, node):
"""
:param remote_path: path under a project in DDSConnection
:param node: Project|Folder to find children under
"""
self.remote_path = remote_path
self.node = node
def get_child(self):
"""
Find file or folder at the remote_path
:return: File|Folder
"""
path_parts = self.remote_path.split(os.sep)
return self._get_child_recurse(path_parts, self.node)
def _get_child_recurse(self, path_parts, node):
if not path_parts:
return node
head, tail = path_parts[0], path_parts[1:]
for child in node.get_children():
if child.name == head:
return self._get_child_recurse(tail, child)
raise ItemNotFound("No item at path {}".format(self.remote_path))
class PathToFiles(object):
def __init__(self):
self.paths = OrderedDict()
def add_paths_for_children_of_node(self, node):
self._child_recurse(node, '')
def _child_recurse(self, node, parent_path):
for child in node.get_children():
path = self._make_path(parent_path, child)
if child.kind == KindType.file_str:
self.paths[path] = child
else:
self._child_recurse(child, path)
@staticmethod
def _make_path(parent_path, child):
if parent_path:
return os.path.join(parent_path, child.name)
else:
return child.name
class UploadContext(object):
"""
Contains settings and monitoring methods used while uploading a file.
"""
def __init__(self, config, data_service, upload_id, path_data):
self.config = config
self.data_service = data_service
self.upload_id = upload_id
self.watcher = self
self.local_file = UploadFileInfo(path_data)
def transferring_item(self, item, increment_amt):
pass
def start_waiting(self):
pass
def done_waiting(self):
pass
class UploadFileInfo(object):
"""
Settings about a file being uploaded
"""
def __init__(self, path_data):
"""
:param path_data: PathData
"""
self.size = path_data.size()
self.path = path_data.path
self.kind = KindType.file_str
class ItemNotFound(Exception):
pass
class DuplicateNameError(Exception):
pass | PypiClean |
/Marketingtool-0.0.1.tar.gz/Marketingtool-0.0.1/README.md | # Marketing Tool
This package is a tool help you do follow marketing job:
1. Transcribe the speech in video
2. Insert a video into antho video
3. Translate subtitle files
4. Insert text into video (todo)
### Install requirement
This program require python 3.9 installed
### How to use
Transcribe the speech in video
```
Marketingtool --action transcribe -f videofilepath
```
Insert Video into another video
```
Marketingtool -a insertVideo -f ~/path/to/video -o ~/result/video.mp4 --insert-video ~/insert/video.mp4
```
Translate subtitle files
```
Marketingtool --action translate -f /path/to/subtitle/file --source-lang chinese --targetlang english
```
### How to develop
You can also install python package comfortably with pip:
```
python3 -m venv ./
source markenv/bin/activate
pip3 install -e .
```
#### Update depend python package for requirement.txt
```
pip3 install pipreqs
pipreqs ./ --force
```
#### How to test
test edit movie function
```
python3 -m unittest Tests.videoedit_tests.VideoeditTestCase.test_insert_text
``` | PypiClean |
/Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/runtime/activation_checkpointing/checkpointing.py |
# DeepSpeed Team
"""
Use to partition the activations stored for backward propagation
Therefore reduces the memory consumption
Also implements CPU checkpointing and contiguous memory checkpointing
Reduces memory consumption and memory fragmentation
Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py
b886b7bb972afe72bac0f5de4f42a4a7bae8ebef
"""
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import copy
import torch
import contextlib
from deepspeed import comm as dist
import mmap
from torch import _C
from deepspeed.runtime.config import DeepSpeedConfig
from deepspeed.utils import logger
from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank
from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers
from deepspeed.accelerator import get_accelerator
# DeepSpeed Checkpointing Enabled or Disabled
deepspeed_checkpointing_enabled = False
# MP parameters
mpu = None
mp_rank = None
mp_size = None
mp_group = None
# Model Parameters
num_layers = None
# Checkpointing buffers
contiguous_data_buffers = []
data_offsets = []
contiguous_size_buffers = []
size_offsets = []
timers = None
# optimization flags
PARTITION_ACTIVATIONS = False
CPU_CHECKPOINT = False
CONTIGUOUS_CHECKPOINTING = False
SYNCHRONIZE = False
PROFILE_TIME = False
# Default name for the model parallel rng tracker.
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
transport_stream = None
cuda_device = None
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with get_accelerator().device(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device(get_accelerator().device_name())
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device(get_accelerator().device_name(), device)
def cb():
idx = device.index
if idx is None:
idx = get_accelerator().current_device()
default_generator = get_accelerator().default_generator(idx)
default_generator.set_state(new_state)
get_accelerator().lazy_call(cb)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
return copy.copy(self.states_)
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception('cuda rng state {} already exists'.format(name))
# Get the current rng state.
orig_rng_state = get_accelerator().get_rng_state()
# Set the new state and store it.
get_accelerator().manual_seed(seed)
self.states_[name] = get_accelerator().get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception('cuda rng state {} is not added'.format(name))
# Store current rng state.
orig_cuda_rng_state = get_accelerator().get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = get_accelerator().get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
# RNG tracker object.
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no get_accelerator().manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model parallel groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
global mpu
tp_rank = bwc_tensor_model_parallel_rank(mpu)
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + tp_rank
# Data parallel gets the original seed.
data_parallel_seed = seed
if dist.get_rank() == 0:
logger.info(
'> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank,
mpu.get_data_parallel_rank(),
model_parallel_seed, data_parallel_seed), )
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
get_accelerator().manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size / mp_size
start = partition_size * mp_rank
return int(start)
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size"
partition_size = size / mp_size
return int(partition_size)
def gather_partitioned_activations(tensors, device=None):
global mp_rank, mp_size, mp_group
assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}'
inputs = []
num_args = int(len(tensors) / 2)
for i in range(num_args):
item = tensors[2 * i]
size = tensors[2 * i + 1]
if not is_activation_to_checkpoint(item):
inputs.append(item)
continue
# don't need to do all_gather if model parallel is not enabled
if mp_group is None or mp_size == 1:
item = item.view(list(size.numpy()))
inputs.append(item)
continue
partition_size = item.numel()
tensor_size = partition_size * mp_size
if device is not None:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device)
else:
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions = []
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i, partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions, partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data = input_tensor.data
inputs.append(item)
return tuple(inputs)
def extract_tensors(all_objects):
"""
Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation.
The order of tensors and non-tensors is preserved in their respective output groups.
Parameters:
all_objects (list/tuple): Objects containing tensors and non-tensors to be split.
Returns:
tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor.
"""
tensor_objects = [v for v in all_objects if torch.is_tensor(v)]
non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)]
tensor_flags = [torch.is_tensor(v) for v in all_objects]
if type(all_objects) is tuple:
return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags)
return tensor_objects, non_tensor_objects, tensor_flags
def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags):
"""
Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple).
Parameters:
tensor_objects (list/tuple): Tensors to merge.
non_tensor_objects (list/tuple): Non-tensors to merge.
tensor_flags (list/tuple): Indicates whether each position in output is a tensor.
Returns:
tuple: Merge of tensors and non-tensors
"""
merged_objects = []
tensor_idx = 0
non_tensor_idx = 0
real_tensor_flags = None
# remove the flags that are assigned to the size of the flattened tensors
if PARTITION_ACTIVATIONS:
real_tensor_flags = []
previous_flag = False
for flag in tensor_flags:
if previous_flag:
previous_flag = False
continue
previous_flag = flag
real_tensor_flags.append(flag)
else:
real_tensor_flags = tensor_flags
for is_tensor in real_tensor_flags:
if is_tensor:
merged_objects.append(tensor_objects[tensor_idx])
tensor_idx += 1
else:
merged_objects.append(non_tensor_objects[non_tensor_idx])
non_tensor_idx += 1
return tuple(merged_objects)
def is_activation_to_checkpoint(item):
"""
Is an activation to be checkpointed
"""
global mp_size
return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size
def partition_activations(args, cpu_checkpoint, contiguous_checkpoint):
global contiguous_data_buffers, data_offsets
inputs = []
num_non_fp_tensors = 0
for arg_index, item in enumerate(args):
if not is_activation_to_checkpoint(item):
inputs.append(item)
num_non_fp_tensors += 1
continue
i = arg_index - num_non_fp_tensors
partition_size = get_partition_size(item)
partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone()
buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device
if contiguous_checkpoint:
if i >= len(contiguous_data_buffers):
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers.append(tensor_list)
data_offsets.append(0)
elif contiguous_data_buffers[i] is None:
tensor_list = [
torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device)
for _ in range(num_layers)
]
contiguous_data_buffers[i] = tensor_list
data_offsets[i] = 0
# Because the 'new_empty' returns uninitialized pages,
# the pages need to be populated during the cudaMemcpy time
# which increases the data copy time. To avoid this, we
# pre-populate these pages by simply writing 0 ahead of
# the actual cudaMemcpy operation time. Due to the
# previously launched GPU kernels, there is a small
# window of time here for CPUs to populate pages asynchronously.
contiguous_data_buffers[i][data_offsets[i]].data[range(
0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0],
int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0
contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data)
data_offsets[i] = data_offsets[i] + 1
inputs.append(contiguous_partition)
else:
partition = partition.cpu() if CPU_CHECKPOINT else partition
inputs.append(partition)
return inputs
def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint):
global contiguous_size_buffers, size_offsets
new_args = []
num_non_fp_tensors = 0
for arg_index, (arg, inp) in enumerate(zip(args, inputs)):
size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
new_args.append(size)
num_non_fp_tensors += 1
continue
arg.data = inp.data
new_args.append(arg)
i = arg_index - num_non_fp_tensors
if contiguous_checkpoint:
numel = size.numel()
if i >= len(contiguous_size_buffers):
tmp = torch.tensor(())
contiguous_size_buffers.append(
tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device))
size_offsets.append(0)
elif contiguous_size_buffers[i] is None:
tmp = torch.tensor(())
contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)
size_offsets[i] = 0
contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data)
contiguous_size = contiguous_size.view_as(size)
size_offsets[i] = size_offsets[i] + numel
new_args.append(contiguous_size)
else:
new_args.append(size)
return new_args
def get_cpu_activations_for_backward(args, inputs):
new_args = []
for i, (arg, inp) in enumerate(zip(args, inputs)):
if not is_activation_to_checkpoint(arg):
new_args.append(arg)
continue
arg.data = inp.data
new_args.append(arg)
return new_args
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda
2) the states in the model parallel tracker are also properly
tracked/set/reset.
3) Performance activation partitioning, contiguous memory optimization
4) CPU Checkpointing
5) Profile forward and backward functions
"""
@staticmethod
def forward(ctx, run_function, all_outputs, *args):
global mpu, timers, SYNCHRONIZE, PROFILE_TIME
def save_args_for_backward(*all_args):
tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args)
ctx.deepspeed_saved_tensors = tensor_args
ctx.non_tensor_args = non_tensor_args
ctx.tensor_flags = tensor_flags
if SYNCHRONIZE:
get_accelerator().synchronize()
if timers is None and PROFILE_TIME:
timers = Timers()
if PROFILE_TIME:
timers('forward').start()
ctx.run_function = run_function
global num_layers
global mp_rank, mp_size, mp_group
global contiguous_data_buffers, contiguous_size_buffers
global data_offsets, size_offsets
if mp_rank is None:
if mpu is not None:
if hasattr(mpu, 'get_tensor_model_parallel_rank'):
mp_rank = mpu.get_tensor_model_parallel_rank()
mp_size = mpu.get_tensor_model_parallel_world_size()
mp_group = mpu.get_tensor_model_parallel_group()
else:
mp_rank = mpu.get_model_parallel_rank()
mp_size = mpu.get_model_parallel_world_size()
mp_group = mpu.get_model_parallel_group()
else:
mp_rank = 0
mp_size = 1
mp_group = None
global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset
if cuda_device is None:
see_memory_usage("First Forward Beginning", force=False)
if dist.get_rank() == 0:
logger.info(f"Activation Checkpointing Information")
logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}")
logger.info(
f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers")
logger.info(f"----Synchronization {SYNCHRONIZE}")
logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}")
cuda_device = get_accelerator().current_device_name()
transport_stream = get_accelerator().Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING)
elif CPU_CHECKPOINT:
inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint)
# just in case something funky is happening such as reuse of inputs
inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint)
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
see_memory_usage("Before running forward on the layer", force=False)
# ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
see_memory_usage("After running forward on the layer", force=False)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING)
assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}'
save_args_for_backward(*new_args)
elif CPU_CHECKPOINT:
new_args = get_cpu_activations_for_backward(args, inputs)
save_args_for_backward(*new_args)
else:
save_args_for_backward(*args)
if PROFILE_TIME:
timers('forward').stop()
timers.log(['forward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
# Tensors returned from forward() may not be differentiable.
if torch.is_tensor(outputs):
non_grad_outputs = [outputs] if not outputs.is_floating_point() else []
else:
non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()]
ctx.mark_non_differentiable(*non_grad_outputs)
if torch.is_tensor(outputs):
all_outputs += [outputs]
return outputs
else:
all_outputs += outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
return tuple(outputs)
@staticmethod
def backward(ctx, *grads):
global timers
see_memory_usage("In backward", force=False)
# removing pointers to the contiguous buffer memory
# so that they can be garbage collected once the checkpoints
# have been used
if SYNCHRONIZE:
get_accelerator().synchronize()
if PROFILE_TIME:
timers('backward').start()
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
see_memory_usage("In backward checkpointing code", force=False)
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
# with get_accelerator().stream(transport_stream):
inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors,
device=cuda_device if CPU_CHECKPOINT else None)
detached_inputs = detach_variable(inputs)
elif CPU_CHECKPOINT:
inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.deepspeed_saved_tensors
detached_inputs = detach_variable(inputs)
# Add non tensor input args
detached_inputs = merge_tensors(tensor_objects=detached_inputs,
non_tensor_objects=ctx.non_tensor_args,
tensor_flags=ctx.tensor_flags)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = get_accelerator().get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# if PARTITION_ACTIVATIONS:
# current_stream=get_accelerator().current_stream()
# current_stream.wait_stream(transport_stream)
see_memory_usage("In backward checkpointing code before forward", force=False)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
see_memory_usage("In backward checkpointing code after forward", force=False)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs, )
# Filter out non tensor outputs
outputs, _, _ = extract_tensors(all_objects=outputs)
# Construct arguments to autograd.backward().
# This is usually just outputs and grads, but forward() can return tensors that
# are not differentiable.
output_tensors = []
grad_tensors = []
for out, grad in zip(outputs, grads):
if out.requires_grad:
output_tensors.append(out)
grad_tensors.append(grad)
see_memory_usage("In backward checkpointing code before backward", force=False)
torch.autograd.backward(output_tensors, grad_tensors)
# Force clear our stashed tensors to prevent a memory leak in certain scenarios
ctx.deepspeed_saved_tensors = None
ctx.non_tensor_args = None
ctx.tensor_flags = None
see_memory_usage("After backward checkpointing code after backward", force=False)
if PROFILE_TIME:
timers('backward').stop()
timers.log(['backward'])
if SYNCHRONIZE:
get_accelerator().synchronize()
ret_list = [None, None] # first None for ctx
for inp in detached_inputs:
if torch.is_tensor(inp):
ret_list.append(inp.grad)
else:
ret_list.append(None)
return tuple(ret_list)
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint. """
all_outputs = []
CheckpointFunction.apply(function, all_outputs, *args)
if len(all_outputs) == 1:
return all_outputs[0]
else:
return tuple(all_outputs)
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS = partition_activation
if dist.get_rank() == 0:
logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************")
def set_num_layers(nlayers):
global num_layers
num_layers = nlayers
def reset():
"""Resets memory buffers related to contiguous memory optimizations.
Should be called during eval when multiple forward propagations are
computed without any backward propagation that usually clears these
buffers.
Arguments:
None
Return:
None
"""
if CONTIGUOUS_CHECKPOINTING:
global data_offsets, size_offsets
global contiguous_data_buffers, contiguous_size_buffers
for buffers in contiguous_data_buffers:
buffers = []
# frees up all the pointers to the checkpoints except for the ones
# stored by save for backward
contiguous_data_buffers = []
contiguous_size_buffers = []
data_offsets = []
size_offsets = []
def _configure_using_config_file(config, mpu=None):
global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config
if dist.get_rank() == 0:
logger.info(config.repr())
PARTITION_ACTIVATIONS = config.partition_activations
CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization
num_layers = config.number_checkpoints
CPU_CHECKPOINT = config.cpu_checkpointing
SYNCHRONIZE = config.synchronize_checkpoint_boundary
PROFILE_TIME = config.profile
def _configure_defaults():
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
PARTITION_ACTIVATIONS = False
CONTIGUOUS_CHECKPOINTING = False
num_layers = False
CPU_CHECKPOINT = False
SYNCHRONIZE = False
PROFILE_TIME = False
deepspeed_checkpointing_enabled = True
def configure(
mpu_,
deepspeed_config=None,
partition_activations=None,
contiguous_checkpointing=None,
num_checkpoints=None,
checkpoint_in_cpu=None,
synchronize=None,
profile=None,
):
"""Configure DeepSpeed Activation Checkpointing.
Arguments:
mpu_: Optional: An object that implements the following methods
get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size
deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to
configure DeepSpeed Activation Checkpointing
partition_activations: Optional: Partitions activation checkpoint across model parallel
GPUs when enabled. By default False. Will overwrite deepspeed_config if provided
contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory
buffer. Works only with homogeneous checkpoints when partition_activations is enabled.
Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if
provided
num_checkpoints: Optional: Number of activation checkpoints stored during the forward
propagation of the model. Used to calculate the buffer size for contiguous_checkpointing
Will overwrite deepspeed_config if provided
checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with
partition_activation. Default is false. Will overwrite deepspeed_config if provided
synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of
each call to deepspeed.checkpointing.checkpoint for both forward and backward pass.
By default false. Will overwrite deepspeed_config if provided
profile: Optional: Logs the forward and backward time for each
deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config
if provided
Returns:
None
"""
global mpu, num_layers, deepspeed_checkpointing_enabled
global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \
CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME
_configure_defaults()
if mpu_ is not None:
mpu = mpu_
if deepspeed_config is not None:
_configure_using_config_file(deepspeed_config, mpu=mpu)
if partition_activations is not None:
PARTITION_ACTIVATIONS = partition_activations
if contiguous_checkpointing is not None:
CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing
if num_checkpoints is not None:
num_layers = num_checkpoints
if checkpoint_in_cpu is not None:
CPU_CHECKPOINT = checkpoint_in_cpu
if synchronize is not None:
SYNCHRONIZE = synchronize
if profile is not None:
PROFILE_TIME = profile
if CONTIGUOUS_CHECKPOINTING:
assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config"
if CONTIGUOUS_CHECKPOINTING:
assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing"
def is_configured():
"""True if deepspeed activation checkpointing has been configured
by calling deepspeed.checkpointing.configure, else returns false
Arguments:
None
Return:
True of configured, else False
"""
return deepspeed_checkpointing_enabled | PypiClean |
/Lantz-0.3.zip/Lantz-0.3/lantz/drivers/sutter/lambda103.py | from lantz import Feat, DictFeat, Action
from lantz.messagebased import MessageBasedDriver
def logged(func):
return func
class Lambda103(MessageBasedDriver):
"""High performance, microprocessor-controlled multi-filter wheel system
for imaging applications requiring up to 3 filter wheels.
"""
DEFAULTS = {'ASRL': {'write_termination': '',
'read_termination': '',
}}
def initialize(self):
super().initialize()
self.speed = 1
@Feat(None, values={True: chr(170), False: chr(172)})
def open_A(self, value):
"""Open shutter A.
"""
self.send(value)
@logged
def flush(self):
"""Flush.
"""
self.serial.flushInput()
self.serial.flushOutput()
self.serial.flush()
# TODO: WTF 2 values for the same wheel
@DictFeat(None, keys={'A': 0, 'B': 1})
def position(self, key, value):
"""Set filter wheel position and speed.
w = 0 -> Filter wheels A and C
w = 1 -> Fliter wheel B
"""
command = chr( key * 128 + self.speed * 14 + value)
self.send(command)
@Action()
def motorsON(self):
"""Power on all motors."""
self.send(chr(206))
return "Motors ON"
@Action()
def status(self):
return "Status {}".format(self.query(chr(204)))
@Feat(None, values={True: chr(238), False: chr(239)})
def remote(self, value):
"""Set Local-Mode."""
self.send(value)
@Action()
def reset(self):
"""Reset the controller."""
self.send(chr(251))
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test PI E-662')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with Lambda103(args.port) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
from time import sleep
inst.remote = True
inst.open_A = True
sleep(5)
inst.open_A = False
sleep(1)
for i in range(9):
fw.position['A']= i
sleep(1)
sleep(1)
inst.remote = False
fw.open_A = False | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/model/interfaces/gxipy/gxwrapper.py | #!/usr/bin/python
# -*-mode:python ; tab-width:4 -*- ex:set tabstop=4 shiftwidth=4 expandtab: -*-
# -*- coding:utf-8 -*-
from ctypes import *
import sys
import os
if sys.platform == 'linux2' or sys.platform == 'linux':
try:
dll = CDLL('/usr/lib/libgxiapi.so')
except OSError:
print("Cannot find libgxiapi.so.")
elif sys.platform == 'win32':
try:
os.add_dll_directory("C:\\Program Files\\Daheng Imaging\\GalaxySDK\\APIDll\\Win64\\")
#dll = WinDLL('GxIAPI.dll', winmode=0) # https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
mFWD = os.path.dirname(os.path.realpath(__file__))
try:
dll = WinDLL(mFWD+'\\dll\\GxIAPI.dll', winmode=0) # https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
except:
dll = WinDLL('GxIAPI.dll', winmode=1) # https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
except OSError:
print('Cannot find GxIAPI.dll.')
else:
dll = -1
# Error code
class GxStatusList:
SUCCESS = 0 # Success
ERROR = -1 # There is a unspecified internal error that is not expected to occur
NOT_FOUND_TL = -2 # The TL library cannot be found
NOT_FOUND_DEVICE = -3 # The device is not found
OFFLINE = -4 # The current device is in a offline state
INVALID_PARAMETER = -5 # Invalid parameter, Generally the pointer is NULL or the input IP and
# Other parameter formats are invalid
INVALID_HANDLE = -6 # Invalid handle
INVALID_CALL = -7 # The interface is invalid, which refers to software interface logic error
INVALID_ACCESS = -8 # The function is currently inaccessible or the device access mode is incorrect
NEED_MORE_BUFFER = -9 # The user request buffer is insufficient: the user input buffersize during
# the read operation is less than the actual need
ERROR_TYPE = -10 # The type of FeatureID used by the user is incorrect,
# such as an integer interface using a floating-point function code
OUT_OF_RANGE = -11 # The value written by the user is crossed
NOT_IMPLEMENTED = -12 # This function is not currently supported
NOT_INIT_API = -13 # There is no call to initialize the interface
TIMEOUT = -14 # Timeout error
REPEAT_OPENED = -1004 # The device has been opened
def __init__(self):
pass
class GxOpenMode:
SN = 0 # Opens the device via a serial number
IP = 1 # Opens the device via an IP address
MAC = 2 # Opens the device via a MAC address
INDEX = 3 # Opens the device via a serial number(Start from 1)
USER_ID = 4 # Opens the device via user defined ID
def __init__(self):
pass
class GxFrameMask:
TYPE_MASK = 0xF0000000
LEVEL_MASK = 0x0F000000
def __init__(self):
pass
class GxFeatureType:
INT = 0x10000000 # Integer type
FLOAT = 0X20000000 # Floating point type
ENUM = 0x30000000 # Enum type
BOOL = 0x40000000 # Boolean type
STRING = 0x50000000 # String type
BUFFER = 0x60000000 # Block data type
COMMAND = 0x70000000 # Command type
def __init__(self):
pass
class GxFeatureLevel:
REMOTE_DEV = 0x00000000 # RemoteDevice Layer
TL = 0x01000000 # TL Layer
IF = 0x02000000 # Interface Layer
DEV = 0x03000000 # Device Layer
DS = 0x04000000 # DataStream Layer
def __init__(self):
pass
class GxFeatureID:
# ---------------Device Information Section---------------------------
STRING_DEVICE_VENDOR_NAME = 0x50000000 # The name of the device's vendor
STRING_DEVICE_MODEL_NAME = 0x50000001 # The model name of the device
STRING_DEVICE_FIRMWARE_VERSION = 0x50000002 # The version of the device's firmware and software
STRING_DEVICE_VERSION = 0x50000003 # The version of the device
STRING_DEVICE_SERIAL_NUMBER = 0x50000004 # A serial number for device
STRING_FACTORY_SETTING_VERSION = 0x50000006 # The version of the device's Factory Setting
STRING_DEVICE_USER_ID = 0x50000007 # A user programmable string
INT_DEVICE_LINK_SELECTOR = 0x10000008 # Selects which Link of the device to control
ENUM_DEVICE_LINK_THROUGHPUT_LIMIT_MODE = 0x30000009 # DeviceLinkThroughputLimit switch
INT_DEVICE_LINK_THROUGHPUT_LIMIT = 0x1000000a # Limits the maximum bandwidth of the data
INT_DEVICE_LINK_CURRENT_THROUGHPUT = 0x1000000b # Current bandwidth of the data
COMMAND_DEVICE_RESET = 0x7000000c # Device reset
INT_TIMESTAMP_TICK_FREQUENCY = 0x1000000d # Timestamp tick frequency
COMMAND_TIMESTAMP_LATCH = 0x7000000e # Timestamp latch
COMMAND_TIMESTAMP_RESET = 0x7000000f # Timestamp reset
COMMAND_TIMESTAMP_LATCH_RESET = 0x70000010 # Timestamp latch reset
INT_TIMESTAMP_LATCH_VALUE = 0x10000011 # The value of timestamp latch
# ---------------ImageFormat Section----------------------------------
INT_SENSOR_WIDTH = 0x100003e8 # The actual width of the camera's sensor in pixels
INT_SENSOR_HEIGHT = 0x100003e9 # The actual height of the camera's sensor in pixels
INT_WIDTH_MAX = 0x100003ea # Width max[read_only]
INT_HEIGHT_MAX = 0x100003eb # Height max[read_only]
INT_OFFSET_X = 0x100003ec # The X offset for the area of interest
INT_OFFSET_Y = 0x100003ed # The Y offset for the area of interest
INT_WIDTH = 0x100003ee # the width of the area of interest in pixels
INT_HEIGHT = 0x100003ef # the height of the area of interest in pixels
INT_BINNING_HORIZONTAL = 0x100003f0 # Horizontal pixel Binning
INT_BINNING_VERTICAL = 0x100003f1 # Vertical pixel Binning
INT_DECIMATION_HORIZONTAL = 0x100003f2 # Horizontal pixel sampling
INT_DECIMATION_VERTICAL = 0x100003f3 # Vertical pixel sampling
ENUM_PIXEL_SIZE = 0x300003f4 # Pixel depth, Reference GxPixelSizeEntry
ENUM_PIXEL_COLOR_FILTER = 0x300003f5 # Bayer format, Reference GxPixelColorFilterEntry
ENUM_PIXEL_FORMAT = 0x300003f6 # Pixel format, Reference GxPixelFormatEntry
BOOL_REVERSE_X = 0x400003f7 # Horizontal flipping
BOOL_REVERSE_Y = 0x400003f8 # Vertical flipping
ENUM_TEST_PATTERN = 0x300003f9 # Test pattern, Reference GxTestPatternEntry
ENUM_TEST_PATTERN_GENERATOR_SELECTOR = 0x300003fa # The source of test pattern, reference GxTestPatternGeneratorSelectorEntry
ENUM_REGION_SEND_MODE = 0x300003fb # ROI region output mode, reference GxRegionSendModeEntry
ENUM_REGION_MODE = 0x300003fc # ROI region output switch
ENUM_REGION_SELECTOR = 0x300003fd # ROI region select, reference GxRegionSelectorEntry
INT_CENTER_WIDTH = 0x100003fe # Window width
INT_CENTER_HEIGHT = 0x100003ff # Window height
ENUM_BINNING_HORIZONTAL_MODE = 0x30000400 # Binning horizontal mode
ENUM_BINNING_VERTICAL_MODE = 0x30000401 # Binning vertical mode
# ---------------TransportLayer Section-------------------------------
INT_PAYLOAD_SIZE = 0x100007d0 # Size of images in byte
BOOL_GEV_CURRENT_IP_CONFIGURATION_LLA = 0x400007d1 # IP configuration by LLA.
BOOL_GEV_CURRENT_IP_CONFIGURATION_DHCP = 0x400007d2 # IP configuration by DHCP
BOOL_GEV_CURRENT_IP_CONFIGURATION_PERSISTENT_IP = 0x400007d3 # IP configuration by PersistentIP
INT_ESTIMATED_BANDWIDTH = 0x100007d4 # Estimated Bandwidth in Bps
INT_GEV_HEARTBEAT_TIMEOUT = 0x100007d5 # The heartbeat timeout in milliseconds
INT_GEV_PACKET_SIZE = 0x100007d6 # The packet size in bytes for each packet
INT_GEV_PACKET_DELAY = 0x100007d7 # A delay between the transmission of each packet
INT_GEV_LINK_SPEED = 0x100007d8 # The connection speed in Mbps
# ---------------AcquisitionTrigger Section---------------------------
ENUM_ACQUISITION_MODE = 0x30000bb8 # The mode of acquisition, Reference: GxAcquisitionModeEntry
COMMAND_ACQUISITION_START = 0x70000bb9 # The command for starts the acquisition of images
COMMAND_ACQUISITION_STOP = 0x70000bba # The command for stop the acquisition of images
INT_ACQUISITION_SPEED_LEVEL = 0x10000bbb # The level for acquisition speed
INT_ACQUISITION_FRAME_COUNT = 0x10000bbc
ENUM_TRIGGER_MODE = 0x30000bbd # Trigger mode, Reference:GxTriggerModeEntry
COMMAND_TRIGGER_SOFTWARE = 0x70000bbe # The command for generates a software trigger signal
ENUM_TRIGGER_ACTIVATION = 0x30000bbf # Trigger polarity, Reference GxTriggerActivationEntry
ENUM_TRIGGER_SWITCH = 0x30000bc0 # The switch of External trigger
FLOAT_EXPOSURE_TIME = 0x20000bc1 # Exposure time
ENUM_EXPOSURE_AUTO = 0x30000bc2 # Exposure auto
FLOAT_TRIGGER_FILTER_RAISING = 0x20000bc3 # The Value of rising edge triggered filter
FLOAT_TRIGGER_FILTER_FALLING = 0x20000bc4 # The Value of falling edge triggered filter
ENUM_TRIGGER_SOURCE = 0x30000bc5 # Trigger source, Reference GxTriggerSourceEntry
ENUM_EXPOSURE_MODE = 0x30000bc6 # Exposure mode, Reference GxExposureModeEntry
ENUM_TRIGGER_SELECTOR = 0x30000bc7 # Trigger type, Reference GxTriggerSelectorEntry
FLOAT_TRIGGER_DELAY = 0x20000bc8 # The trigger delay in microsecond
ENUM_TRANSFER_CONTROL_MODE = 0x30000bc9 # The control method for the transfers, Reference GxTransferControlModeEntry
ENUM_TRANSFER_OPERATION_MODE = 0x30000bca # The operation method for the transfers, Reference GxTransferOperationModeEntry
COMMAND_TRANSFER_START = 0x70000bcb # Starts the streaming of data blocks out of the device
INT_TRANSFER_BLOCK_COUNT = 0x10000bcc # The number of data Blocks that the device should stream before stopping
BOOL_FRAME_STORE_COVER_ACTIVE = 0x40000bcd # The switch for frame cover
ENUM_ACQUISITION_FRAME_RATE_MODE = 0x30000bce # The switch for Control frame rate
FLOAT_ACQUISITION_FRAME_RATE = 0x20000bcf # The value for Control frame rate
FLOAT_CURRENT_ACQUISITION_FRAME_RATE = 0x20000bd0 # The maximum allowed frame acquisition rate
ENUM_FIXED_PATTERN_NOISE_CORRECT_MODE = 0x30000bd1 # The switch of fixed pattern noise correct
INT_ACQUISITION_BURST_FRAME_COUNT = 0x10000bd6 # The acquisition burst frame count
ENUM_ACQUISITION_STATUS_SELECTOR = 0x30000bd7 # The selector of acquisition status
BOOL_ACQUISITION_STATUS = 0x40000bd8 # The acquisition status
FLOAT_EXPOSURE_DELAY = 0x2000765c # The exposure delay
# ----------------DigitalIO Section-----------------------------------
ENUM_USER_OUTPUT_SELECTOR = 0x30000fa0 # selects user settable output signal, Reference GxUserOutputSelectorEntry
BOOL_USER_OUTPUT_VALUE = 0x40000fa1 # The state of the output signal
ENUM_USER_OUTPUT_MODE = 0x30000fa2 # UserIO output mode, Reference GxUserOutputModeEntry
ENUM_STROBE_SWITCH = 0x30000fa3 # Strobe switch
ENUM_LINE_SELECTOR = 0x30000fa4 # Line selector, Reference GxLineSelectorEntry
ENUM_LINE_MODE = 0x30000fa5 # Line mode, Reference GxLineModeEntry
BOOL_LINE_INVERTER = 0x40000fa6 # Pin level reversal
ENUM_LINE_SOURCE = 0x30000fa7 # line source, Reference GxLineSourceEntry
BOOL_LINE_STATUS = 0x40000fa8 # line status
INT_LINE_STATUS_ALL = 0x10000fa9 # all line status
FLOAT_PULSE_WIDTH = 0x20000faa #
# ----------------AnalogControls Section------------------------------
ENUM_GAIN_AUTO = 0x30001388 # gain auto, Reference GxGainAutoEntry
ENUM_GAIN_SELECTOR = 0x30001389 # selects gain channel, Reference GxGainSelectorEntry
ENUM_BLACK_LEVEL_AUTO = 0x3000138b # Black level auto, Reference GxBlackLevelAutoEntry
ENUM_BLACK_LEVEL_SELECTOR = 0x3000138c # Black level channel, Reference GxBlackLevelSelectEntry
ENUM_BALANCE_WHITE_AUTO = 0x3000138e # Balance white auto, Reference GxBalanceWhiteAutoEntry
ENUM_BALANCE_RATIO_SELECTOR = 0x3000138f # selects Balance white channel, Reference GxBalanceRatioSelectorEntry
FLOAT_BALANCE_RATIO = 0x20001390 # Balance white channel ratio
ENUM_COLOR_CORRECT = 0x30001391 # Color correct, Reference GxColorCorrectEntry
ENUM_DEAD_PIXEL_CORRECT = 0x30001392 # Pixel correct, Reference GxDeadPixelCorrectEntry
FLOAT_GAIN = 0x20001393 # gain
FLOAT_BLACK_LEVEL = 0x20001394 # Black level
BOOL_GAMMA_ENABLE = 0x40001395 # Gamma enable bit
ENUM_GAMMA_MODE = 0x30001396 # Gamma mode
FLOAT_GAMMA = 0x20001397 # The value of Gamma
INT_DIGITAL_SHIFT = 0x10001398 #
# ---------------CustomFeature Section--------------------------------
INT_ADC_LEVEL = 0x10001770 # AD conversion level
INT_H_BLANKING = 0x10001771 # Horizontal blanking
INT_V_BLANKING = 0x10001772 # Vertical blanking
STRING_USER_PASSWORD = 0x50001773 # User encrypted zone cipher
STRING_VERIFY_PASSWORD = 0x50001774 # User encrypted zone check cipher
BUFFER_USER_DATA = 0x60001775 # User encrypted area content
INT_GRAY_VALUE = 0x10001776 # Expected gray value
ENUM_AA_LIGHT_ENVIRONMENT = 0x30001777 # Gain auto, Exposure auto, Light environment type,
# Reference GxAALightEnvironmentEntry
INT_AAROI_OFFSETX = 0x10001778 # The X offset for the rect of interest in pixels for 2A
INT_AAROI_OFFSETY = 0x10001779 # The Y offset for the rect of interest in pixels for 2A
INT_AAROI_WIDTH = 0x1000177a # The width offset for the rect of interest in pixels for 2A
INT_AAROI_HEIGHT = 0x1000177b # The height offset for the rect of interest in pixels for 2A
FLOAT_AUTO_GAIN_MIN = 0x2000177c # Automatic gain minimum
FLOAT_AUTO_GAIN_MAX = 0x2000177d # Automatic gain maximum
FLOAT_AUTO_EXPOSURE_TIME_MIN = 0x2000177e # Automatic exposure minimum
FLOAT_AUTO_EXPOSURE_TIME_MAX = 0x2000177f # Automatic exposure maximum
BUFFER_FRAME_INFORMATION = 0x60001780 # Image frame information
INT_CONTRAST_PARAM = 0x10001781 # Contrast parameter
FLOAT_GAMMA_PARAM = 0x20001782 # Gamma parameter
INT_COLOR_CORRECTION_PARAM = 0x10001783 # Color correction param
ENUM_IMAGE_GRAY_RAISE_SWITCH = 0x30001784 # Image gray raise, Reference GxImageGrayRaiseSwitchEntry
ENUM_AWB_LAMP_HOUSE = 0x30001785 # Automatic white balance light source
# Reference GxAWBLampHouseEntry
INT_AWBROI_OFFSETX = 0x10001786 # Offset_X of automatic white balance region
INT_AWBROI_OFFSETY = 0x10001787 # Offset_Y of automatic white balance region
INT_AWBROI_WIDTH = 0x10001788 # Width of automatic white balance region
INT_AWBROI_HEIGHT = 0x10001789 # Height of automatic white balance region
ENUM_SHARPNESS_MODE = 0x3000178a # Sharpness mode, Reference GxSharpnessModeEntry
FLOAT_SHARPNESS = 0x2000178b # Sharpness
# ---------------UserSetControl Section-------------------------------
ENUM_USER_SET_SELECTOR = 0x30001b58 # Parameter group selection, Reference GxUserSetSelectorEntry
COMMAND_USER_SET_LOAD = 0x70001b59 # Load parameter group
COMMAND_USER_SET_SAVE = 0x70001b5a # Save parameter group
ENUM_USER_SET_DEFAULT = 0x30001b5b # Startup parameter group, Reference GxUserSetDefaultEntry
# ---------------Event Section----------------------------------------
ENUM_EVENT_SELECTOR = 0x30001f40 # Event source select, Reference GxEventSelectorEntry
ENUM_EVENT_NOTIFICATION = 0x30001f41 # Event enabled, Reference GxEventNotificationEntry
INT_EVENT_EXPOSURE_END = 0x10001f42 # Exposure end event
INT_EVENT_EXPOSURE_END_TIMESTAMP = 0x10001f43 # The timestamp of Exposure end event
INT_EVENT_EXPOSURE_END_FRAME_ID = 0x10001f44 # The frame id of Exposure end event
INT_EVENT_BLOCK_DISCARD = 0x10001f45 # Block discard event
INT_EVENT_BLOCK_DISCARD_TIMESTAMP = 0x10001f46 # The timestamp of Block discard event
INT_EVENT_OVERRUN = 0x10001f47 # Event queue overflow event
INT_EVENT_OVERRUN_TIMESTAMP = 0x10001f48 # The timestamp of event queue overflow event
INT_EVENT_FRAME_START_OVER_TRIGGER = 0x10001f49 # Trigger signal shield event
INT_EVENT_FRAME_START_OVER_TRIGGER_TIMESTAMP = 0x10001f4a # The timestamp of trigger signal shield event
INT_EVENT_BLOCK_NOT_EMPTY = 0x10001f4b # Frame memory not empty event
INT_EVENT_BLOCK_NOT_EMPTY_TIMESTAMP = 0x10001f4c # The timestamp of frame memory not empty event
INT_EVENT_INTERNAL_ERROR = 0x10001f4d # Internal erroneous event
INT_EVENT_INTERNAL_ERROR_TIMESTAMP = 0x10001f4e # The timestamp of internal erroneous event
# ---------------LUT Section------------------------------------------
ENUM_LUT_SELECTOR = 0x30002328 # Select lut, Reference GxLutSelectorEntry
BUFFER_LUT_VALUE_ALL = 0x60002329 # Lut data
BOOL_LUT_ENABLE = 0x4000232a # Lut enable bit
INT_LUT_INDEX = 0x1000232b # Lut index
INT_LUT_VALUE = 0x1000232c # Lut value
# ---------------Color Transformation Control-------------------------
ENUM_COLOR_TRANSFORMATION_MODE = 0x30002af8 # Color transformation mode
BOOL_COLOR_TRANSFORMATION_ENABLE = 0x40002af9 # Color transformation enable bit
ENUM_COLOR_TRANSFORMATION_VALUE_SELECTOR = 0x30002afa # The selector of color transformation value
FLOAT_COLOR_TRANSFORMATION_VALUE = 0x20002afb # The value of color transformation
# ---------------ChunkData Section------------------------------------
BOOL_CHUNK_MODE_ACTIVE = 0x40002711 # Enable frame information
ENUM_CHUNK_SELECTOR = 0x30002712 # Select frame information channel, Reference GxChunkSelectorEntry
BOOL_CHUNK_ENABLE = 0x40002713 # Enable single frame information channel
# ---------------Device Feature---------------------------------------
INT_COMMAND_TIMEOUT = 0x13000000 # The time of command timeout
INT_COMMAND_RETRY_COUNT = 0x13000001 # Command retry times
# ---------------DataStream Feature-----------------------------------
INT_ANNOUNCED_BUFFER_COUNT = 0x14000000 # The number of Buffer declarations
INT_DELIVERED_FRAME_COUNT = 0x14000001 # Number of received frames (including remnant frames)
INT_LOST_FRAME_COUNT = 0x14000002 # Number of lost frames caused by buffer deficiency
INT_INCOMPLETE_FRAME_COUNT = 0x14000003 # Number of residual frames received
INT_DELIVERED_PACKET_COUNT = 0x14000004 # The number of packets received
INT_RESEND_PACKET_COUNT = 0x14000005 # Number of retransmission packages
INT_RESCUED_PACKED_COUNT = 0x14000006 # Retransmission success package number
INT_RESEND_COMMAND_COUNT = 0x14000007 # Retransmission command times
INT_UNEXPECTED_PACKED_COUNT = 0x14000008 # Exception packet number
INT_MAX_PACKET_COUNT_IN_ONE_BLOCK = 0x14000009 # Data block maximum retransmission number
INT_MAX_PACKET_COUNT_IN_ONE_COMMAND = 0x1400000a # The maximum number of packets contained in one command
INT_RESEND_TIMEOUT = 0x1400000b # Retransmission timeout time
INT_MAX_WAIT_PACKET_COUNT = 0x1400000c # Maximum waiting packet number
ENUM_RESEND_MODE = 0x3400000d # Retransmission mode, Reference GxDSResendModeEntry
INT_MISSING_BLOCK_ID_COUNT = 0x1400000e # BlockID lost number
INT_BLOCK_TIMEOUT = 0x1400000f # Data block timeout time
INT_STREAM_TRANSFER_SIZE = 0x14000010 # Data block size
INT_STREAM_TRANSFER_NUMBER_URB = 0x14000011 # Number of data blocks
INT_MAX_NUM_QUEUE_BUFFER = 0x14000012 # The maximum Buffer number of the collection queue
INT_PACKET_TIMEOUT = 0x14000013 # Packet timeout time
def __init__(self):
pass
class GxDeviceIPInfo(Structure):
_fields_ = [
('device_id', c_char * 68), # The unique identifier of the device.
('mac', c_char * 32), # MAC address
('ip', c_char * 32), # IP address
('subnet_mask', c_char * 32), # Subnet mask
('gateway', c_char * 32), # Gateway
('nic_mac', c_char * 32), # The MAC address of the corresponding NIC(Network Interface Card).
('nic_ip', c_char * 32), # The IP of the corresponding NIC
('nic_subnet_mask', c_char * 32), # The subnet mask of the corresponding NIC
('nic_gateWay', c_char * 32), # The Gateway of the corresponding NIC
('nic_description', c_char * 132), # The description of the corresponding NIC
('reserved', c_char * 512), # Reserved 512 bytes
]
def __str__(self):
return "GxDeviceIPInfo\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxDeviceBaseInfo(Structure):
_fields_ = [
('vendor_name', c_char*32), # Vendor name
('model_name', c_char*32), # TModel name
('serial_number', c_char*32), # Serial number
('display_name', c_char*132), # Display name
('device_id', c_char*68), # The unique identifier of the device.
('user_id', c_char*68), # User's custom name
('access_status', c_int), # Access status that is currently supported by the device
# Refer to GxAccessStatus
('device_class', c_int), # Device type. Such as USB2.0, GEV.
('reserved', c_char*300), # Reserved 300 bytes
]
def __str__(self):
return "GxDeviceBaseInfo\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxOpenParam(Structure):
_fields_ = [
('content', c_char_p),
('open_mode', c_uint),
('access_mode', c_uint),
]
def __str__(self):
return "GxOpenParam\n%s" % "\n".join( "%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxFrameCallbackParam(Structure):
_fields_ = [
('user_param_index', c_void_p), # User private data
('status', c_int), # The return state of the image
('image_buf', c_void_p), # Image buff address
('image_size', c_int), # Image data size, Including frame information
('width', c_int), # Image width
('height', c_int), # Image height
('pixel_format', c_int), # Image PixFormat
('frame_id', c_ulonglong), # The frame id of the image
('timestamp', c_ulonglong), # Time stamp of image
('reserved', c_int), # Reserved
]
def __str__(self):
return "GxFrameCallbackParam\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxFrameData(Structure):
_fields_ = [
('status', c_int), # The return state of the image
('image_buf', c_void_p), # Image buff address
('width', c_int), # Image width
('height', c_int), # Image height
('pixel_format', c_int), # Image PixFormat
('image_size', c_int), # Image data size, Including frame information
('frame_id', c_ulonglong), # The frame id of the image
('timestamp', c_ulonglong), # Time stamp of image
('buf_id', c_ulonglong), # Image buff ID
('reserved', c_int), # Reserved
]
def __str__(self):
return "GxFrameData\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxIntRange(Structure):
_fields_ = [
('min', c_ulonglong),
('max', c_ulonglong),
('inc', c_ulonglong),
('reserved', c_int * 8),
]
def __str__(self):
return "GxIntRange\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxFloatRange(Structure):
_fields_ = [
('min', c_double),
('max', c_double),
('inc', c_double),
('unit', c_char * 8),
('inc_is_valid', c_bool),
('reserved', c_char * 31),
]
def __str__(self):
return "GxFloatRange\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
class GxEnumDescription(Structure):
_fields_ = [
('value', c_longlong), # Enum value
('symbolic', c_char * 64), # Character description
('reserved', c_int * 8),
]
def __str__(self):
return "GxEnumDescription\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
if hasattr(dll, 'GXInitLib'):
def gx_init_lib():
"""
:brief Initialize the device library for some resource application operations
:return: None
"""
return dll.GXInitLib()
if hasattr(dll, 'GXCloseLib'):
def gx_close_lib():
"""
:brief Close the device library to release resources.
:return: None
"""
return dll.GXCloseLib()
if hasattr(dll, 'GXGetLastError'):
def gx_get_last_error(size=1024):
"""
:brief To get the latest error descriptions information of the program
:param size: string buff length(size=1024)
Type: Int, Minnum: 0
:return: status: State return value, See detail in GxStatusList
err_code: Return the last error code
err_content: the latest error descriptions information of the program
"""
err_code = c_int()
err_content_buff = create_string_buffer(size)
content_size = c_size_t()
content_size.value = size
status = dll.GXGetLastError(byref(err_code), byref(err_content_buff), byref(content_size))
err_content = string_at(err_content_buff, content_size.value-1)
return status, err_code.value, string_decoding(err_content)
if hasattr(dll, 'GXUpdateDeviceList'):
def gx_update_device_list(time_out=200):
"""
:brief Enumerating currently all available devices in subnet and gets the number of devices.
:param time_out: The timeout time of enumeration (unit: ms).
Type: Int, Minimum:0
:return: status: State return value, See detail in GxStatusList
device_num: The number of devices
"""
time_out_c = c_uint()
time_out_c.value = time_out
device_num = c_uint()
status = dll.GXUpdateDeviceList(byref(device_num), time_out_c)
return status, device_num.value
if hasattr(dll, 'GXUpdateAllDeviceList'):
def gx_update_all_device_list(time_out=200):
"""
:brief Enumerating currently all available devices in entire network and gets the number of devices
:param time_out: The timeout time of enumeration (unit: ms).
Type: Int, Minimum: 0
:return: status: State return value, See detail in GxStatusList
device_num: The number of devices
"""
time_out_c = c_uint()
time_out_c.value = time_out
device_num = c_uint()
status = dll.GXUpdateAllDeviceList(byref(device_num), time_out_c)
return status, device_num.value
if hasattr(dll, 'GXGetAllDeviceBaseInfo'):
def gx_get_all_device_base_info(devices_num):
"""
:brief To get the basic information of all the devices
:param devices_num: The number of devices
Type: Int, Minimum: 0
:return: status: State return value, See detail in GxStatusList
device_ip_info: The structure pointer of the device information(GxDeviceIPInfo)
"""
devices_info = (GxDeviceBaseInfo * devices_num)()
buf_size_c = c_size_t()
buf_size_c.value = sizeof(GxDeviceBaseInfo) * devices_num
status = dll.GXGetAllDeviceBaseInfo(byref(devices_info), byref(buf_size_c))
return status, devices_info
if hasattr(dll, 'GXGetDeviceIPInfo'):
def gx_get_device_ip_info(index):
"""
:brief To get the network information of the device.
:param index: Device index
Type: Int, Minimum: 1
:return: status: State return value, See detail in GxStatusList
device_ip_info: The structure pointer of the device information(GxDeviceIPInfo)
"""
index_c = c_uint()
index_c.value = index
device_ip_info = GxDeviceIPInfo()
status = dll.GXGetDeviceIPInfo(index_c, byref(device_ip_info))
return status, device_ip_info
if hasattr(dll, 'GXOpenDeviceByIndex'):
def gx_open_device_by_index(index):
"""
:brief Open the device by a specific Index(1, 2, 3, ...)
:param index: Device index
Type: Int, Minimum: 1
:return: status: State return value, See detail in GxStatusList
handle: The device handle returned by the interface
"""
index_c = c_uint()
index_c.value = index
handle_c = c_void_p()
status = dll.GXOpenDeviceByIndex(index_c, byref(handle_c))
return status, handle_c.value
if hasattr(dll, 'GXOpenDevice'):
def gx_open_device(open_param):
"""
:brief Open the device by a specific unique identification, such as: SN, IP, MAC, Index etc.
:param open_param: The open device parameter which is configurated by the user.
Type: GxOpenParam
:return: status: State return value, See detail in GxStatusList
handle: The device handle returned by the interface
"""
handle = c_void_p()
status = dll.GXOpenDevice(byref(open_param), byref(handle))
return status, handle.value
if hasattr(dll, 'GXCloseDevice'):
def gx_close_device(handle):
"""
:brief Specify the device handle to close the device
:param handle: The device handle that the user specified to close.
Type: Long, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXCloseDevice(handle_c)
return status
'''
if hasattr(dll, 'GXGetDevicePersistentIpAddress'):
def gx_get_device_persistent_ip_address(handle, ip_length=16, subnet_mask_length=16, default_gateway_length=16):
"""
:brief Get the persistent IP information of the device
:param handle: The handle of the device
:param ip_length: The character string length of the device persistent IP address.
:param subnet_mask_length: The character string length of the device persistent subnet mask.
:param default_gateway_length: The character string length of the device persistent gateway
:return: status: State return value, See detail in GxStatusList
ip: The device persistent IP address(str)
subnet_mask: The device persistent subnet mask(str)
default_gateway: The device persistent gateway
"""
handle_c = c_void_p()
handle_c.value = handle
ip_length_c = c_uint()
ip_length_c.value = ip_length
ip_c = create_string_buffer(ip_length)
subnet_mask_length_c = c_uint()
subnet_mask_length_c.value = subnet_mask_length
subnet_mask_c = create_string_buffer(subnet_mask_length)
default_gateway_length_c = c_uint()
default_gateway_length_c.value = default_gateway_length
default_gateway_c = create_string_buffer(default_gateway_length)
status = dll.GXGetDevicePersistentIpAddress(handle_c, byref(ip_c), byref(ip_length_c),
byref(subnet_mask_c), byref(subnet_mask_length_c),
byref(default_gateway_c), byref(default_gateway_length_c))
ip = string_at(ip_c, ip_length_c.value-1)
subnet_mask = string_at(subnet_mask_c, subnet_mask_length_c.value-1)
default_gateway = string_at(default_gateway_c, default_gateway_length_c.value-1)
return status, string_decoding(ip), string_decoding(subnet_mask), string_decoding(default_gateway)
if hasattr(dll, 'GXSetDevicePersistentIpAddress'):
def gx_set_device_persistent_ip_address(handle, ip, subnet_mask, default_gate_way):
"""
:brief Set the persistent IP information of the device
:param handle: The handle of the device
:param ip: The persistent IP character string of the device(str)
:param subnet_mask: The persistent subnet mask character string of the device(str)
:param default_gate_way: The persistent gateway character string of the device(str)
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
ip_c = create_string_buffer(string_encoding(ip))
subnet_mask_c = create_string_buffer(string_encoding(subnet_mask))
default_gate_way_c = create_string_buffer(string_encoding(default_gate_way))
status = dll.GXSetDevicePersistentIpAddress(handle_c, byref(ip_c), byref(subnet_mask_c),
byref(default_gate_way_c))
return status
'''
if hasattr(dll, 'GXGetFeatureName'):
def gx_get_feature_name(handle, feature_id):
"""
:brief Get the string description for the feature code
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: Int, Greater than 0
:return: status: State return value, See detail in GxStatusList
name: The string description for the feature code
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
size_c = c_size_t()
status = dll.GXGetFeatureName(handle_c, feature_id_c, None, byref(size_c))
name_buff = create_string_buffer(size_c.value)
status = dll.GXGetFeatureName(handle_c, feature_id_c, byref(name_buff), byref(size_c))
name = string_at(name_buff, size_c.value-1)
return status, string_decoding(name)
if hasattr(dll, 'GXIsImplemented'):
def gx_is_implemented(handle, feature_id):
"""
:brief Inquire the current camera whether support a special feature.
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
is_implemented: To return the result whether is support this feature
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
is_implemented = c_bool()
status = dll.GXIsImplemented(handle_c, feature_id_c, byref(is_implemented))
return status, is_implemented.value
if hasattr(dll, 'GXIsReadable'):
def gx_is_readable(handle, feature_id):
"""
:brief Inquire if a feature code is currently readable
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
is_readable: To return the result whether the feature code ID is readable
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
is_readable = c_bool()
status = dll.GXIsReadable(handle_c, feature_id_c, byref(is_readable))
return status, is_readable.value
if hasattr(dll, 'GXIsWritable'):
def gx_is_writable(handle, feature_id):
"""
:brief Inquire if a feature code is currently writable
:param handle: The handle of the device.
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
is_writeable: To return the result whether the feature code ID is writable(Bool)
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
is_writeable = c_bool()
status = dll.GXIsWritable(handle_c, feature_id_c, byref(is_writeable))
return status, is_writeable.value
if hasattr(dll, 'GXGetIntRange'):
def gx_get_int_range(handle, feature_id):
"""
:brief To get the minimum value, maximum value and steps of the int type
:param handle: The handle of the device.
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
int_range: The structure of range description(GxIntRange)
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
int_range = GxIntRange()
status = dll.GXGetIntRange(handle_c, feature_id_c, byref(int_range))
return status, int_range
if hasattr(dll, 'GXGetInt'):
def gx_get_int(handle, feature_id):
"""
:brief Get the current value of the int type.
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
int_value: Get the current value of the int type
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
int_value = c_int64()
status = dll.GXGetInt(handle_c, feature_id_c, byref(int_value))
return status, int_value.value
if hasattr(dll, 'GXSetInt'):
def gx_set_int(handle, feature_id, int_value):
"""
:brief Set the value of int type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID.
Type: int, Greater than 0
:param int_value: The value that the user will set
Type: long, minnum:0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
value_c = c_int64()
value_c.value = int_value
status = dll.GXSetInt(handle_c, feature_id_c, value_c)
return status
if hasattr(dll, 'GXGetFloatRange'):
def gx_get_float_range(handle, feature_id):
"""
:brief To get the minimum value, maximum value, stepsand unit of the float type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
float_range: The description structure(GxFloatRange)
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
float_range = GxFloatRange()
status = dll.GXGetFloatRange(handle_c, feature_id_c, byref(float_range))
return status, float_range
if hasattr(dll, 'GXSetFloat'):
def gx_set_float(handle, feature_id, float_value):
"""
:brief Set the value of float type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:param float_value: The float value that the user will set
Type: double
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
value_c = c_double()
value_c.value = float_value
status = dll.GXSetFloat(handle_c, feature_id_c, value_c)
return status
if hasattr(dll, 'GXGetFloat'):
def gx_get_float(handle, feature_id):
"""
:brief Get the value of float type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
float_value = c_double()
status = dll.GXGetFloat(handle_c, feature_id_c, byref(float_value))
return status, float_value.value
if hasattr(dll, 'GXGetEnumEntryNums'):
def gx_get_enum_entry_nums(handle, feature_id):
"""
:brief Get the number of the options for the enumeration item
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
enum_num: The number of the options for the enumeration item
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
enum_nums = c_uint()
status = dll.GXGetEnumEntryNums(handle_c, feature_id_c, byref(enum_nums))
return status, enum_nums.value
if hasattr(dll, 'GXGetEnumDescription'):
def gx_get_enum_description(handle, feature_id, enum_num):
"""
:brief To get the description information of the enumerated type values
the number of enumerated items and the value and descriptions of each item
please reference GxEnumDescription.
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:param enum_num: The number of enumerated information
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
enum_description: Enumerated information array(GxEnumDescription)
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
buf_size_c = c_size_t()
buf_size_c.value = sizeof(GxEnumDescription) * enum_num
enum_description = (GxEnumDescription * enum_num)()
status = dll.GXGetEnumDescription(handle_c, feature_id_c, byref(enum_description), byref(buf_size_c))
return status, enum_description
if hasattr(dll, 'GXGetEnum'):
def gx_get_enum(handle, feature_id):
"""
:brief To get the current enumeration value
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
enum_value: Get the current enumeration value
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
enum_value = c_int64()
status = dll.GXGetEnum(handle_c, feature_id_c, byref(enum_value))
return status, enum_value.value
if hasattr(dll, 'GXSetEnum'):
def gx_set_enum(handle, feature_id, enum_value):
"""
:brief Set the enumeration value
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:param enum_value: Set the enumeration value
Type: int
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
value_c = c_int64()
value_c.value = enum_value
status = dll.GXSetEnum(handle_c, feature_id_c, value_c)
return status
if hasattr(dll, 'GXGetBool'):
def gx_get_bool(handle, feature_id):
"""
:brief Get the value of bool type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
boot_value: the value of bool type
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
boot_value = c_bool()
status = dll.GXGetBool(handle_c, feature_id_c, byref(boot_value))
return status, boot_value.value
if hasattr(dll, 'GXSetBool'):
def gx_set_bool(handle, feature_id, bool_value):
"""
:brief Set the value of bool type
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:param bool_value: The bool value that the user will set
Type: Bool
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
value_c = c_bool()
value_c.value = bool_value
status = dll.GXSetBool(handle_c, feature_id_c, value_c)
return status
if hasattr(dll, 'GXGetStringLength'):
def gx_get_string_length(handle, feature_id):
"""
:brief Get the current value length of the character string type. Unit: byte
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
string_length: the current value length of the character string type
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
string_length = c_size_t()
status = dll.GXGetStringLength(handle_c, feature_id_c, byref(string_length))
return status, string_length.value - 1
if hasattr(dll, 'GXGetStringMaxLength'):
def gx_get_string_max_length(handle, feature_id):
"""
:brief Get the maximum length of the string type value, Unit: byte
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
string_max_length: the maximum length of the string type value
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
string_max_length = c_size_t()
status = dll.GXGetStringMaxLength(handle_c, feature_id, byref(string_max_length))
return status, string_max_length.value - 1
if hasattr(dll, 'GXGetString'):
def gx_get_string(handle, feature_id):
"""
:brief Get the content of the string type value
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
size_c = c_size_t()
status = dll.GXGetString(handle_c, feature_id_c, None, byref(size_c))
content_c = create_string_buffer(size_c.value)
status = dll.GXGetString(handle_c, feature_id_c, byref(content_c), byref(size_c))
content = string_at(content_c, size_c.value-1)
return status, string_decoding(content)
if hasattr(dll, 'GXSetString'):
def gx_set_string(handle, feature_id, content):
"""
:brief Set the content of the string value
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:param content: The string will be setting(str)
Type: str
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
content_c = create_string_buffer(string_encoding(content))
status = dll.GXSetString(handle_c, feature_id_c, byref(content_c))
return status
if hasattr(dll, 'GXGetBufferLength'):
def gx_get_buffer_length(handle, feature_id):
"""
:brief Get the length of the chunk data and the unit is byte,
the user can apply the buffer based on the length obtained,
and then call the gx_get_buffer to get the chunk data.
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
buff_length: Buff length, Unit: byte
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
buff_length = c_size_t()
status = dll.GXGetBufferLength(handle_c, feature_id_c, byref(buff_length))
return status, buff_length.value
if hasattr(dll, 'GXGetBuffer'):
def gx_get_buffer(handle, feature_id):
"""
:brief Get the chunk data
:param handle: The handle of the device
Type: Long, Greater than 0
:param feature_id: The feature code ID
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
buff: chunk data
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
buff_length_c = c_size_t()
status = dll.GXGetBuffer(handle_c, feature_id_c, None, byref(buff_length_c))
buff_c = (c_ubyte * buff_length_c.value)()
status = dll.GXGetBuffer(handle_c, feature_id_c, byref(buff_c), byref(buff_length_c))
return status, buff_c
if hasattr(dll, 'GXSetBuffer'):
def gx_set_buffer(handle, feature_id, buff, buff_size):
"""
:brief Set the chunk data
:param handle: The handle of the device
:param feature_id: The feature code ID
Type: long, Greater than 0
:param buff: chunk data buff
Type: Ctype array
:param buff_size: chunk data buff size
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
buff_size_c = c_size_t()
buff_size_c.value = buff_size
status = dll.GXSetBuffer(handle_c, feature_id_c, buff, buff_size_c)
return status
if hasattr(dll, 'GXSendCommand'):
def gx_send_command(handle, feature_id):
"""
:brief Send the command
:param handle: The handle of the device
Type: long, Greater than 0
:param feature_id: The feature code ID.
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
status = dll.GXSendCommand(handle_c, feature_id_c)
return status
CAP_CALL = CFUNCTYPE(None, POINTER(GxFrameCallbackParam))
if hasattr(dll, 'GXRegisterCaptureCallback'):
def gx_register_capture_callback(handle, cap_call):
"""
:brief Register the capture callback function
:param handle: The handle of the device
:param cap_call: The callback function that the user will register(@ CAP_CALL)
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXRegisterCaptureCallback(handle_c, None, cap_call)
return status
if hasattr(dll, 'GXUnregisterCaptureCallback'):
def gx_unregister_capture_callback(handle):
"""
:brief Unregister the capture callback function
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXUnregisterCaptureCallback(handle_c)
return status
if hasattr(dll, 'GXGetImage'):
def gx_get_image(handle, frame_data, time_out=200):
"""
:brief After starting acquisition, you can call this function to get images directly.
Noting that the interface can not be mixed with the callback capture mode.
:param handle: The handle of the device
Type: Long, Greater than 0
:param frame_data: [out]User introduced to receive the image data
Type: GxFrameData
:param time_out: The timeout time of capture image.(unit: ms)
Type: int, minnum: 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
time_out_c = c_uint()
time_out_c.value = time_out
status = dll.GXGetImage(handle_c, byref(frame_data), time_out_c)
return status
if hasattr(dll, 'GXFlushQueue'):
def gx_flush_queue(handle):
"""
:brief Empty the cache image in the image output queue.
:param handle: The handle of the device
Type: Long, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXFlushQueue(handle_c)
return status
OFF_LINE_CALL = CFUNCTYPE(None, c_void_p)
if hasattr(dll, 'GXRegisterDeviceOfflineCallback'):
def gx_register_device_offline_callback(handle, call_back):
"""
:brief At present, the mercury GIGE camera provides the device offline notification event mechanism,
the user can call this interface to register the event handle callback function
:param handle: The handle of the device
:param call_back: The user event handle callback function(@ OFF_LINE_CALL)
:return: status: State return value, See detail in GxStatusList
call_back_handle: The handle of offline callback function
the handle is used for unregistering the callback function
"""
handle_c = c_void_p()
handle_c.value = handle
call_back_handle = c_void_p()
status = dll.GXRegisterDeviceOfflineCallback(handle_c, None, call_back, byref(call_back_handle))
return status, call_back_handle.value
if hasattr(dll, 'GXUnregisterDeviceOfflineCallback'):
def gx_unregister_device_offline_callback(handle, call_back_handle):
"""
:brief Unregister event handle callback function
:param handle: The handle of the device
:param call_back_handle: The handle of device offline callback function
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
call_back_handle_c = c_void_p()
call_back_handle_c.value = call_back_handle
status = dll.GXUnregisterDeviceOfflineCallback(handle_c, call_back_handle_c)
return status
'''
if hasattr(dll, 'GXFlushEvent'):
def gx_flush_event(handle):
"""
:brief Empty the device event, such as the frame exposure to end the event data queue
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXFlushEvent(handle_c)
return status
if hasattr(dll, 'GXGetEventNumInQueue'):
def gx_get_event_num_in_queue(handle):
"""
:brief Get the number of the events in the current remote device event queue cache.
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
event_num: event number.
"""
handle_c = c_void_p()
handle_c.value = handle
event_num = c_uint()
status = dll.GXGetEventNumInQueue(handle_c, byref(event_num))
return status, event_num.value
FEATURE_CALL = CFUNCTYPE(None, c_uint, c_void_p)
if hasattr(dll, 'GXRegisterFeatureCallback'):
def gx_register_feature_callback(handle, call_back, feature_id):
"""
:brief Register device attribute update callback function.
When the current value of the device property has updated, or the accessible property is changed,
call this callback function.
:param handle: The handle of the device
:param call_back: The user event handle callback function(@ FEATURE_CALL)
:param feature_id: The feature code ID
:return: status: State return value, See detail in GxStatusList
call_back_handle: The handle of property update callback function,
to unregister the callback function.
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
call_back_handle = c_void_p()
status = dll.GXRegisterFeatureCallback(handle_c, None, call_back, feature_id_c, byref(call_back_handle))
return status, call_back_handle.value
if hasattr(dll, 'GXUnregisterFeatureCallback'):
"""
"""
def gx_unregister_feature_cEallback(handle, feature_id, call_back_handle):
"""
:brief Unregister device attribute update callback function
:param handle: The handle of the device
:param feature_id: The feature code ID
:param call_back_handle: Handle of property update callback function
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
feature_id_c = c_int()
feature_id_c.value = feature_id
call_back_handle_c = c_void_p()
call_back_handle_c.value = call_back_handle
status = dll.GXUnregisterFeatureCallback(handle_c, feature_id_c, call_back_handle_c)
return status
'''
if hasattr(dll, 'GXExportConfigFile'):
def gx_export_config_file(handle, file_path):
"""
:brief Export the current parameter of the camera to the configuration file.
:param handle: The handle of the device
Type: Long, Greater than 0
:param file_path: The path of the configuration file that to be generated
Type: str
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
file_path_c = create_string_buffer(string_encoding(file_path))
status = dll.GXExportConfigFile(handle_c, byref(file_path_c))
return status
if hasattr(dll, 'GXImportConfigFile'):
def gx_import_config_file(handle, file_path, verify):
"""
:brief Import the configuration file for the camera
:param handle: The handle of the device
Type: Long, Greater than 0
:param file_path: The path of the configuration file(str)
Type: str
:param verify: If this value is true, all imported values will be read out
to check whether they are consistent.
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
verify_c = c_bool()
verify_c.value = verify
file_path_c = create_string_buffer(string_encoding(file_path))
status = dll.GXImportConfigFile(handle_c, byref(file_path_c), verify_c)
return status
'''
if hasattr(dll, 'GXReadRemoteDevicePort'):
def gx_read_remote_device_port(handle, address, buff, size):
"""
:brief Read data for user specified register.
:param handle: The handle of the device
:param address: Register address
:param buff: Output data buff
:param size: Buff size
:return: status: State return value, See detail in GxStatusList
size: Returns the length of the actual read register
"""
handle_c = c_void_p()
handle_c.value = handle
address_c = c_ulonglong()
address_c.value = address
size_c = c_uint()
size_c.value = size
status = dll.GXReadRemoteDevicePort(handle_c, address_c, byref(buff), byref(size_c))
return status, size_c.value
if hasattr(dll, 'GXWriteRemoteDevicePort'):
def gx_write_remote_device_port(handle, address, buff, size):
"""
:brief Writes user specified data to a user specified register.
:param handle: The handle of the device
:param address: Register address
:param buff: User data
:param size: User data size
:return: status: State return value, See detail in GxStatusList
size: Returns the length of the actual write register
"""
handle_c = c_void_p()
handle_c.value = handle
address_c = c_ulonglong()
address_c.value = address
size_c = c_uint()
size_c.value = size
status = dll.GXWriteRemoteDevicePort(handle_c, address_c, byref(buff), byref(size_c))
return status, size_c.value
if hasattr(dll, 'GXGigEIpConfiguration'):
def gx_gige_ip_configuration(mac_address, ipconfig_flag, ip_address, subnet_mask, default_gateway, user_id):
"""
"brief Configure the static IP address of the camera
:param mac_address: The MAC address of the device(str)
:param ipconfig_flag: IP Configuration mode(GxIPConfigureModeList)
:param ip_address: The IP address to be set(str)
:param subnet_mask: The subnet mask to be set(str)
:param default_gateway: The default gateway to be set(str)
:param user_id: The user-defined name to be set(str)
:return: status: State return value, See detail in GxStatusList
"""
mac_address_c = create_string_buffer(string_encoding(mac_address))
ip_address_c = create_string_buffer(string_encoding(ip_address))
subnet_mask_c = create_string_buffer(string_encoding(subnet_mask))
default_gateway_c = create_string_buffer(string_encoding(default_gateway))
user_id_c = create_string_buffer(string_encoding(user_id))
status = dll.GXGigEIpConfiguration(mac_address_c, ipconfig_flag,
ip_address_c, subnet_mask_c,
default_gateway_c, user_id_c)
return status
if hasattr(dll, 'GXGigEForceIp'):
def gx_gige_force_ip(mac_address, ip_address, subnet_mask, default_gate_way):
"""
:brief Execute the Force IP
:param mac_address: The MAC address of the device(str)
:param ip_address: The IP address to be set(str)
:param subnet_mask: The subnet mask to be set(str)
:param default_gate_way: The default gateway to be set(str)
:return: status: State return value, See detail in GxStatusList
"""
mac_address_c = create_string_buffer(string_encoding(mac_address))
ip_address_c = create_string_buffer(string_encoding(ip_address))
subnet_mask_c = create_string_buffer(string_encoding(subnet_mask))
default_gate_way_c = create_string_buffer(string_encoding(default_gate_way))
status = dll.GXGigEForceIp(mac_address_c, ip_address_c, subnet_mask_c, default_gate_way_c)
return status
'''
if hasattr(dll, 'GXSetAcqusitionBufferNumber'):
def gx_set_acquisition_buffer_number(handle, buffer_num):
"""
:brief Users Set Acquisition buffer Number
:param handle: The handle of the device
Type: Long, Greater than 0
:param buffer_num: Acquisition buffer Number
Type: int, Greater than 0
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
buffer_num_c = c_uint64()
buffer_num_c.value = buffer_num
status = dll.GXSetAcqusitionBufferNumber(handle_c, buffer_num_c)
return status
'''
if hasattr(dll, 'GXStreamOn'):
def gx_stream_on(handle):
"""
:brief Start acquisition
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXStreamOn(handle_c)
return status
if hasattr(dll, 'GXDQBuf'):
def gx_dequeue_buf(handle, time_out):
"""
:brief Get a image
After the image processing is completed, the gx_queue_buf interface needs to be called
otherwise the collection will not be able to continue.
:param handle: The handle of the device
:param time_out: The timeout time of capture image.(unit: ms)
:return: status: State return value, See detail in GxStatusList
frame_data: Image data
frame_data_p: Image buff address
"""
handle_c = c_void_p()
handle_c.value = handle
time_out_c = c_uint()
time_out_c.value = time_out
frame_data_p = c_void_p()
status = dll.GXDQBuf(handle_c, byref(frame_data_p), time_out_c)
frame_data = GxFrameData()
memmove(addressof(frame_data), frame_data_p.value, sizeof(frame_data))
return status, frame_data, frame_data_p.value
if hasattr(dll, 'GXQBuf'):
def gx_queue_buf(handle, frame_data_p):
"""
:brief Put an image Buff back to the GxIAPI library and continue to be used for collection.
:param handle: The handle of the device
:param frame_data_p: Image buff address
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
frame_data_p_p = c_void_p()
frame_data_p_p.value = frame_data_p
status = dll.GXQBuf(handle_c, frame_data_p_p)
return status
if hasattr(dll, 'GXDQAllBufs'):
def gx_dequeue_all_bufs(handle, buff_num, time_out):
"""
:brief Get images
After the image processing is completed, the gx_queue_all_bufs interface needs to be called
otherwise the collection will not be able to continue.
:param handle: The handle of the device
:param buff_num: The number of images expected to be obtained
:param time_out: The timeout time of capture image.(unit: ms)
:return: status: State return value, See detail in GxStatusList
frame_data: Image data arrays
frame_count: The number of images that are actually returned
"""
handle_c = c_void_p()
handle_c.value = handle
time_out_c = c_uint()
time_out_c.value = time_out
frame_data_p = (c_void_p * buff_num)()
frame_count_c = c_uint()
status = dll.GXDQAllBufs(handle_c, frame_data_p, buff_num, byref(frame_count_c), time_out_c)
frame_data = (GxFrameData * buff_num)()
for i in range(buff_num):
memmove(addressof(frame_data[i]), frame_data_p[i], sizeof(GxFrameData))
return status, frame_data, frame_count_c.value
if hasattr(dll, 'GXQAllBufs'):
def gx_queue_all_bufs(handle):
"""
:brief The image data Buf is returned to the GxIAPI library and used for collection.
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXQAllBufs(handle_c)
return status
if hasattr(dll, 'GXStreamOff'):
def gx_stream_off(handle):
"""
:brief Stop acquisition
:param handle: The handle of the device
:return: status: State return value, See detail in GxStatusList
"""
handle_c = c_void_p()
handle_c.value = handle
status = dll.GXStreamOff(handle_c)
return status
'''
def string_encoding(string):
"""
:breif Python3.X: String encoded as bytes
:param string
:return:
"""
if sys.version_info.major == 3:
string = string.encode()
return string
def string_decoding(string):
"""
:brief Python3.X: bytes decoded as string
:param string
:return:
"""
if sys.version_info.major == 3:
string = string.decode()
return string
def range_check(value, min_value, max_value, inc_value=0):
"""
:brief Determine if the input parameter is within range
:param value: input value
:param min_value: max value
:param max_value: min value
:param inc_value: step size, default=0
:return: True/False
"""
if value < min_value:
return False
elif value > max_value:
return False
elif (inc_value != 0) and (value != int(value / inc_value) * inc_value):
return False
return True | PypiClean |
/HippodamiaAgent-0.1.12.tar.gz/HippodamiaAgent-0.1.12/hippodamia_agent/states/state_machine.py | from hippodamia_agent.states.machinelogger import MachineLogger
from tantamount.fsm2dot import GetDotNotation
from hippodamia_agent.states.active import Active
from hippodamia_agent.states.initialized import Initialized
from hippodamia_agent.states.onboarding import Onboarding
from hippodamia_agent.states.onboarded import Onboarded
from hippodamia_agent.states.terminiating import Terminating
from hippodamia_agent.states.uninitialized import Uninitialized
from hippodamia_agent.states.event_ids import event_ids
from hippodamia_agent.states.state_ids import state_ids
import pelops.mylogger
import threading
import collections
import pprint
def create(sigint, onboarding_timeout, restart_timeout, logger):
logger.info("creating state machine - start")
logger.info("creating state machine - creating states")
history = collections.deque(maxlen=50)
states = {
state_ids.UNINITIALIZED: Uninitialized(state_ids.UNINITIALIZED, logger, history, sigint),
state_ids.INITIALIZED: Initialized(state_ids.INITIALIZED, logger, history, sigint),
state_ids.ONBOARDING: Onboarding(state_ids.ONBOARDING, logger, history, sigint),
state_ids.ONBOARDED: Onboarded(state_ids.ONBOARDED, logger, history, sigint),
state_ids.ACTIVE: Active(state_ids.ACTIVE, logger, history, sigint),
state_ids.TERMINATING: Terminating(state_ids.TERMINATING, logger, history),
}
machine = MachineLogger(logger)
logger.info("creating state machine - adding states")
for state in states.values():
machine.addstate(state)
logger.info("creating state machine - set start states")
machine.setstartstate(state_ids.UNINITIALIZED)
logger.info("creating state machine - adding transitions")
machine.addtransition(state_ids.UNINITIALIZED, event_ids.NEW_UUID, state_ids.INITIALIZED)
machine.addtransition(state_ids.UNINITIALIZED, event_ids.SIGINT, state_ids.TERMINATING)
machine.addtransition(state_ids.UNINITIALIZED, event_ids.REONBOARDING_REQUEST, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.INITIALIZED, event_ids.SIGINT, state_ids.TERMINATING)
machine.addtransition(state_ids.INITIALIZED, event_ids.ONBOARDING_REQUEST, state_ids.ONBOARDING)
machine.addtransition(state_ids.INITIALIZED, event_ids.REONBOARDING_REQUEST, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.ONBOARDING, event_ids.SIGINT, state_ids.TERMINATING)
machine.addtransition(state_ids.ONBOARDING, event_ids.TIMEOUT, state_ids.INITIALIZED)
machine.addtransition(state_ids.ONBOARDING, event_ids.ONBOARDING_RESPONSE, state_ids.ONBOARDED)
machine.addtransition(state_ids.ONBOARDING, event_ids.REONBOARDING_REQUEST, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.ONBOARDED, event_ids.SIGINT, state_ids.TERMINATING)
machine.addtransition(state_ids.ONBOARDED, event_ids.ACTIVATE, state_ids.ACTIVE)
machine.addtransition(state_ids.ONBOARDED, event_ids.REONBOARDING_REQUEST, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.ACTIVE, event_ids.SIGINT, state_ids.TERMINATING)
machine.addtransition(state_ids.ACTIVE, event_ids.REONBOARDING_REQUEST, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.ACTIVE, event_ids.TIMEOUT, state_ids.UNINITIALIZED)
machine.addtransition(state_ids.TERMINATING, event_ids.RESTART, state_ids.UNINITIALIZED)
logger.info("creating state machine - set timeout events")
machine.addtimeoutevent(state_ids.ONBOARDING, event_ids.TIMEOUT, onboarding_timeout)
machine.addtimeoutevent(state_ids.TERMINATING, event_ids.RESTART, restart_timeout)
machine.addtimeoutevent(state_ids.ACTIVE, event_ids.TIMEOUT, onboarding_timeout) # set to an arbitrary value > 0
# otherwise in case of a problem during onboarding it might happend that the timeoutevent triggers with 0 seconds
# which would lead to a runtime error
logger.info("creating state machine - done")
return machine, states, history
def dot2file(filename):
class NoLogger:
def info(self, message):
pass
def debug(self, message):
pass
def warning(self, message):
pass
def error(self, message):
pass
config = {"log-level": "CRITICAL", "log-file": "hippodamia-agent.log"}
logger = pelops.mylogger.create_logger(config, "dot2file")
#logger = NoLogger()
sigint = threading.Event()
machine, states, history = create(sigint, 60, 120, logger)
gdn = GetDotNotation(machine, getStateId=(lambda x:x.name), getStateName=(lambda x:x.name),
getTransitionName=(lambda x:x.name))
new_dotnotation = gdn.getdotnotation()
try:
with open(filename, 'r') as f:
old_dotnotation = f.read()
except OSError:
old_dotnotation = ""
if new_dotnotation != old_dotnotation:
print("updating {} to latest version.".format(filename))
with open(filename, "w") as f:
f.write(new_dotnotation) | PypiClean |
/ChemDataExtractor_c-1.0.0-py3-none-any.whl/chemdataextractor/biblio/person.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import string
from ..text import QUOTES
from ..text.latex import latex_to_unicode
ORCID_RE = re.compile(r'^\d{4}-\d{4}-\d{4}-\d{4}$')
TITLES = {
'ms', 'miss', 'mrs', 'mr', 'master', 'dr', 'doctor', 'prof', 'professor', 'sir', 'dame', 'madam', 'madame',
'mademoiselle', 'monsieur', 'lord', 'lady', 'rev', 'reverend', 'fr', 'father', 'brother', 'sister', 'pastor',
'cardinal', 'abbot', 'abbess', 'friar', 'mother', 'bishop', 'archbishop', 'priest', 'priestess', 'pope', 'vicar',
'chaplain', 'saint', 'deacon', 'archdeacon', 'rabbi', 'ayatollah', 'imam', 'pres', 'president', 'gov', 'governor',
'rep', 'representative', 'sen', 'senator', 'minister', 'chancellor', 'cllr', 'councillor', 'secretary', 'speaker',
'alderman', 'delegate', 'mayor', 'ambassador', 'prefect', 'premier', 'envoy', 'provost', 'coach', 'principal',
'king', 'queen', 'prince', 'princess', 'royal', 'majesty', 'highness', 'rt', 'duke', 'duchess', 'archduke',
'archduchess', 'marquis', 'marquess', 'marchioness', 'earl', 'count', 'countess', 'viscount', 'viscountess',
'baron', 'baroness', 'sheikh', 'emperor', 'empress', 'tsar', 'tsarina', 'uncle', 'auntie', 'aunt', 'atty',
'attorney', 'advocate', 'judge', 'solicitor', 'barrister', 'comptroller', 'sheriff', 'registrar', 'treasurer',
'associate', 'assistant', 'honorable', 'honourable', 'deputy', 'vice', 'executive', 'his', 'her', 'private',
'corporal', 'sargent', 'seargent', 'officer', 'major', 'captain', 'commander', 'lieutenant', 'colonel', 'general',
'chief', 'admiral', 'pilot', 'resident', 'surgeon', 'nurse', 'col', 'capt', 'cpt', 'maj', 'cpl', 'ltc', 'sgt',
'pfc', 'sfc', 'mg', 'bg', 'ssgt', 'ltcol', 'majgen', 'gen', 'ltgen', 'sgtmaj', 'bgen', 'lcpl', '2ndlt', '1stlt',
'briggen', '1stsgt', 'pvt', '2lt', '1lt', 'ens', 'lt', 'adm', 'vadm', 'cpo', 'mcpo', 'mcpoc', 'scpo', 'radm(lh)',
'radm(uh)', 'ltg'
}
PREFIXES = {
'abu', 'bon', 'bin', 'da', 'dal', 'de', 'del', 'der', 'de', 'di', 'dí', 'ibn', 'la', 'le', 'san', 'st', 'ste',
'van', 'vel', 'von'
}
SUFFIXES = {
'Esq', 'Esquire', 'Bt', 'Btss', 'Jr', 'Sr', '2', 'I', 'II', 'III', 'IV', 'V', 'CLU', 'ChFC', 'CFP', 'MP', 'MSP',
'MEP', 'AM', 'MLA', 'QC', 'KC', 'PC', 'SCJ', 'MHA', 'MNA', 'MPP', 'VC', 'GC', 'KBE', 'CBE', 'MBE', 'DBE', 'GBE',
'OBE', 'MD', 'PhD', 'DBEnv', 'DConstMgt', 'DREst', 'EdD', 'DPhil', 'DLitt', 'DSocSci', 'EngD', 'DD', 'LLD', 'DProf',
'BA', 'BSc', 'LLB', 'BEng', 'MBChB', 'MA', 'MSc', 'MSci', 'MPhil', 'MArch', 'MMORSE', 'MMath', 'MMathStat',
'MPharm', 'MSt', 'MRes', 'MEng', 'MChem', 'MSocSc', 'MMus', 'LLM', 'BCL', 'MPhys', 'MComp', 'MAcc', 'MFin', 'MBA',
'MPA', 'MEd', 'MEnt', 'MCGI', 'MGeol', 'MLitt', 'MEarthSc', 'MClinRes', 'MJur', 'FdA', 'FdSc', 'FdEng', 'PgD',
'PgDip', 'PgC', 'PgCert', 'DipHE', 'OND', 'CertHE', 'RA', 'FRCP', 'FRSC', 'FRSA', 'FRCS', 'FMedSci', 'AMSB',
'MSB', 'FSB', 'FBA', 'FBCS', 'FCPS', 'FGS', 'FREng', 'FRS', 'FRAeS', 'FRAI', 'FRAS', 'MRCP', 'MRCS', 'MRCA', 'FRCA',
'MRCGP', 'FRCGP', 'MRSC', 'MRPharmS', 'FRPharmS', 'FZS', 'FRES', 'CBiol', 'CChem', 'CEng', 'CMath', 'CPhys', 'CSci'
}
SUFFIXES_LOWER = {suf.lower() for suf in SUFFIXES}
NOT_SUFFIX = {'I.', 'V.'}
# Make attributes instead of dict style.
# Parse from string as a class method.
# Mutable attributes that can be set via constructor or modified at any time.
# to_dict, to_json method?
class PersonName(dict):
"""Class for parsing a person's name into its constituent parts.
Parses a name string into title, firstname, middlename, nickname, prefix, lastname, suffix.
Example usage:
p = PersonName('von Beethoven, Ludwig')
PersonName acts like a dict:
print p
print p['firstname']
print json.dumps(p)
Name components can also be access as attributes:
print p.lastname
Instances can be reused by setting the name property:
p.name = 'Henry Ford Jr. III'
print p
Two PersonName objects are equal if every name component matches exactly. For fuzzy matching, use the `could_be`
method. This returns True for names that are not explicitly inconsistent.
This class was written with the intention of parsing BibTeX author names, so name components enclosed within curly
brackets will not be split.
"""
# Useful info at http://nwalsh.com/tex/texhelp/bibtx-23.html
# Issues:
# - Prefix 'ben' is recognised as middlename. Could distinguish 'ben' and 'Ben'?
# - Multiple word first names like "Emma May" or "Billy Joe" aren't supported
def __init__(self, fullname=None, from_bibtex=False):
"""Initialize with a name string.
:param fullname: A person name as a string.
"""
super(PersonName, self).__init__()
self._from_bibtex = from_bibtex
self.fullname = fullname
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.fullname)
def __str__(self):
return dict.__repr__(self)
def could_be(self, other):
"""Return True if the other PersonName is not explicitly inconsistent."""
# TODO: Some suffix and title differences should be allowed
if type(other) is not type(self):
return NotImplemented
if self == other:
return True
for attr in ['title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix']:
if attr not in self or attr not in other:
continue
puncmap = dict((ord(char), None) for char in string.punctuation)
s = self[attr].lower().translate(puncmap)
o = other[attr].lower().translate(puncmap)
if s == o:
continue
if attr in {'firstname', 'middlename', 'lastname'}:
if (({len(comp) for comp in s.split()} == {1} and [el[0] for el in o.split()] == s.split()) or
({len(comp) for comp in o.split()} == {1} and [el[0] for el in s.split()] == o.split())):
continue
return False
return True
@property
def fullname(self):
return self.get('fullname', '')
@fullname.setter
def fullname(self, fullname):
self.clear()
self._parse(fullname)
def __getattr__(self, name):
if name in {'title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix'}:
return self.get(name)
else:
raise AttributeError
def _is_title(self, t):
"""Return true if t is a title."""
return t.lower().replace('.', '') in TITLES
def _is_prefix(self, t):
"""Return true if t is a prefix."""
return t.lower().replace('.', '') in PREFIXES
def _is_suffix(self, t):
"""Return true if t is a suffix."""
return t not in NOT_SUFFIX and (t.replace('.', '') in SUFFIXES or t.replace('.', '') in SUFFIXES_LOWER)
def _tokenize(self, comps):
"""Split name on spaces, unless inside curly brackets or quotes."""
ps = []
for comp in comps:
ps.extend([c.strip(' ,') for c in re.split(r'\s+(?=[^{}]*(?:\{|$))', comp)])
return [p for p in ps if p]
def _clean(self, t, capitalize=None):
"""Convert to normalized unicode and strip trailing full stops."""
if self._from_bibtex:
t = latex_to_unicode(t, capitalize=capitalize)
t = ' '.join([el.rstrip('.') if el.count('.') == 1 else el for el in t.split()])
return t
def _strip(self, tokens, criteria, prop, rev=False):
"""Strip off contiguous tokens from the start or end of the list that meet the criteria."""
num = len(tokens)
res = []
for i, token in enumerate(reversed(tokens) if rev else tokens):
if criteria(token) and num > i + 1:
res.insert(0, tokens.pop()) if rev else res.append(tokens.pop(0))
else:
break
if res:
self[prop] = self._clean(' '.join(res))
return tokens
def _parse(self, fullname):
"""Perform the parsing."""
n = ' '.join(fullname.split()).strip(',')
if not n:
return
comps = [p.strip() for p in n.split(',')]
if len(comps) > 1 and not all([self._is_suffix(comp) for comp in comps[1:]]):
vlj = []
while True:
vlj.append(comps.pop(0))
if not self._is_suffix(comps[0]):
break
ltokens = self._tokenize(vlj)
ltokens = self._strip(ltokens, self._is_prefix, 'prefix')
ltokens = self._strip(ltokens, self._is_suffix, 'suffix', True)
self['lastname'] = self._clean(' '.join(ltokens), capitalize='name')
tokens = self._tokenize(comps)
tokens = self._strip(tokens, self._is_title, 'title')
if not 'lastname' in self:
tokens = self._strip(tokens, self._is_suffix, 'suffix', True)
voni = []
end = len(tokens) - 1
if not 'prefix' in self:
for i, token in enumerate(reversed(tokens)):
if self._is_prefix(token):
if (i == 0 and end > 0) or (not 'lastname' in self and not i == end):
voni.append(end - i)
else:
if (i == 0 and 'lastname' in self) or voni:
break
if voni:
if not 'lastname' in self:
self['lastname'] = self._clean(' '.join(tokens[voni[0]+1:]), capitalize='name')
self['prefix'] = self._clean(' '.join(tokens[voni[-1]:voni[0]+1]))
tokens = tokens[:voni[-1]]
else:
if not 'lastname' in self:
self['lastname'] = self._clean(tokens.pop(), capitalize='name')
if tokens:
self['firstname'] = self._clean(tokens.pop(0), capitalize='name')
if tokens:
nicki = []
for i, token in enumerate(tokens):
if token[0] in QUOTES:
for j, token2 in enumerate(tokens[i:]):
if token2[-1] in QUOTES:
nicki = range(i, i+j+1)
break
if nicki:
self['nickname'] = self._clean(' '.join(tokens[nicki[0]:nicki[-1]+1]).strip(''.join(QUOTES)),
capitalize='name')
tokens[nicki[0]:nicki[-1]+1] = []
if tokens:
self['middlename'] = self._clean(' '.join(tokens), capitalize='name')
namelist = []
for attr in ['title', 'firstname', 'middlename', 'nickname', 'prefix', 'lastname', 'suffix']:
if attr in self:
namelist.append('"%s"' % self[attr] if attr == 'nickname' else self[attr])
self['fullname'] = ' '.join(namelist) | PypiClean |
/IDEA%20Wrapper-0.0.1.tar.gz/IDEA Wrapper-0.0.1/idea_wrapper/record_set.py | import re
import os
import win32com.client
from idea_wrapper.table_def import TableDef
from datetime import date, time
class RecordSet:
def _generate_regex(self):
string_regex = r"\"(.*)\""
date_regex = r"(?:\")?(\d\d\d\d\d\d\d\d)(?:\")?"
time_regex = r"(?:\")?(\d\d\:\d\d\:\d\d)(?:\")?"
num_regex = r"(?:\")?((?:(?:\+)?(?:-)?)\d*((?:\.|,)\d*)?)(?:\")?"
regex = "^"
for field in self.table_def:
type = field.type
if type == 3:
regex += string_regex
elif type == 4:
regex += num_regex
elif type == 5:
regex += date_regex
elif type == 11:
regex += time_regex
else:
regex += r"(?:\")?(.*)(?:\")?"
regex += ";"
return regex[:len(regex) - 1] + "$"
@staticmethod
def _convert(string, type, include_empty_fields):
try:
if type == 4:
if "." in string or "," in string:
return float(string.replace(",", "."))
else:
return int(string)
elif type == 5:
return date(int(string[:4]), int(string[4:6]), int(string[6:]))
elif type == 11:
return time(int(string[:2]), int(string[3:5]), int(string[6:]))
else:
return string
except ValueError as e:
if include_empty_fields:
return None
else:
raise e
except TypeError:
return None
group_amount = { # how many groups does a field type represent (in regex)?
3: 1,
4: 2,
5: 1,
11: 1
}
def _read(self, text, include_empty_fields):
self._content = []
matches = []
for line in text.split("\n"):
if not line:
print("Encountered empty line!")
continue
match = re.match(self._regex, line)
if match:
matches.append(match)
else:
print("Did not match!")
print(line)
exit(1)
for match in matches:
try:
i = 1
line = []
for field in self.table_def:
type = field.type
length = self.group_amount[type]
line.append(RecordSet._convert(match.group(i), type, include_empty_fields))
i += length
self._content.append(Record(line))
except ValueError:
pass
def _export(self, utf8):
task = self._db.exportDatabase()
task.includeAllFields()
eqn = ""
db_name = self._client.uniqueFileName("export.DEL")
db_name = db_name[:len(db_name)-4]
task.performTask(db_name, "Database", "DEL UTF-8" if utf8 else "DEL", 1, self._db.count, eqn)
content = ""
with open(db_name, "r") as f:
line = f.readline()
while line:
line = f.readline()
if line:
content += line
os.remove(db_name)
return content
def __init__(self, db, utf8=False, include_empty_fields=True):
self._client = win32com.client.Dispatch(dispatch="Idea.IdeaClient")
self._db = db
content = self._export(utf8)
self.table_def = TableDef(db.tableDef())
self._regex = self._generate_regex()
self._read(content, include_empty_fields)
self.count = len(self._content)
def __len__(self):
return len(self._content)
def __getitem__(self, item):
return self.get_at(item)
def __str__(self):
return str(self._content)
def __iter__(self):
return iter(self._content)
def get_at(self, index):
return self._content[index]
class Record:
def __init__(self, data):
self._data = data
self.number_of_fields = len(data)
def __getitem__(self, item):
return self.value_at(item)
def __len__(self):
self.number_of_fields = len(self._data)
return self.number_of_fields
def __iter__(self):
return iter(self._data)
def __str__(self):
return str(self._data)
def value_at(self, index):
return self._data[index] | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/plugins/standard/DataFilesPlugin.py | import os
import pkgutil
from nuitka import Options
from nuitka.containers.OrderedSets import OrderedSet
from nuitka.plugins.PluginBase import NuitkaPluginBase
from nuitka.PythonFlavors import isDebianPackagePython
from nuitka.utils.FileOperations import (
getFileList,
resolveShellPatternToFilenames,
)
from nuitka.utils.Yaml import getYamlPackageConfiguration
class NuitkaPluginDataFileCollector(NuitkaPluginBase):
plugin_name = "data-files"
def __init__(self):
self.config = getYamlPackageConfiguration()
@classmethod
def isRelevant(cls):
return Options.isStandaloneMode()
@staticmethod
def isAlwaysEnabled():
return True
def _considerDataFiles(self, module, data_file_config):
# Many details and cases to deal with
# pylint: disable=too-many-branches,too-many-locals
module_name = module.getFullName()
module_folder = module.getCompileTimeDirectory()
target_dir = data_file_config.get("dest_path")
# Default to near module or inside package folder.
if target_dir is None:
if module.isCompiledPythonPackage() or module.isUncompiledPythonPackage():
target_dir = module_name.asPath()
else:
package_name = module_name.getPackageName()
if package_name is not None:
target_dir = module_name.getPackageName().asPath()
else:
target_dir = "."
patterns = data_file_config.get("patterns")
if patterns is not None:
if type(patterns) is not list or not patterns:
self.sysexit(
"Error, requiring list below 'pattern' entry for '%s' entry."
% module_name
)
# TODO: Pattern should be data file kind potentially.
for pattern in patterns:
pattern = os.path.join(module_folder, pattern)
for filename in resolveShellPatternToFilenames(pattern):
filename_base = os.path.relpath(filename, module_folder)
yield self.makeIncludedDataFile(
source_path=filename,
dest_path=os.path.normpath(
os.path.join(target_dir, filename_base)
),
reason="package data for '%s'" % module_name.asString(),
tags="config",
)
empty_dirs = data_file_config.get("empty_dirs")
if empty_dirs is not None:
if type(empty_dirs) is not list or not empty_dirs:
self.sysexit(
"Error, requiring list below 'empty_dirs' entry for '%s' entry."
% module_name
)
for empty_dir in empty_dirs:
yield self.makeIncludedEmptyDirectory(
dest_path=os.path.join(target_dir, empty_dir),
reason="empty dir needed for %r" % module_name.asString(),
tags="config",
)
empty_dir_structures = data_file_config.get("empty_dir_structures")
if empty_dir_structures is not None:
if type(empty_dir_structures) is not list or not empty_dir_structures:
self.sysexit(
"Error, requiring list below 'empty_dirs_structure' entry for '%s' entry."
% module_name
)
# TODO: This ignored config dest_path, which is unused, but not consistent.
for included_data_file in self._getSubDirectoryFolders(
module, sub_dirs=empty_dir_structures
):
yield included_data_file
dirs = data_file_config.get("dirs")
if dirs is not None:
if type(dirs) is not list or not dirs:
self.sysexit(
"Error, requiring list below 'empty_dirs_structure' entry for '%s' entry."
% module_name
)
for data_dir in dirs:
source_path = os.path.join(module_folder, data_dir)
if os.path.isdir(source_path):
yield self.makeIncludedDataDirectory(
source_path=source_path,
dest_path=os.path.join(target_dir, data_dir),
reason="package data directory %r for %r"
% (data_dir, module_name.asString()),
tags="config",
)
def considerDataFiles(self, module):
full_name = module.getFullName()
for entry in self.config.get(full_name, section="data-files"):
if self.evaluateCondition(
full_name=full_name, condition=entry.get("when", "True")
):
for included_data_file in self._considerDataFiles(
module=module, data_file_config=entry
):
yield included_data_file
# TODO: Until the data files are a list and support features to do similar, namely
# to look up via package data files.
if full_name == "lib2to3.pygram" and isDebianPackagePython():
yield self.makeIncludedGeneratedDataFile(
data=pkgutil.get_data("lib2to3", "Grammar.txt"),
dest_path="lib2to3/Grammar.txt",
reason="package data for '%s'" % full_name,
tags="config",
)
yield self.makeIncludedGeneratedDataFile(
data=pkgutil.get_data("lib2to3", "PatternGrammar.txt"),
dest_path="lib2to3/PatternGrammar.txt",
reason="package data for '%s'" % full_name,
tags="config",
)
def _getSubDirectoryFolders(self, module, sub_dirs):
"""Get dirnames in given subdirectories of the module.
Notes:
All dirnames in folders below one of the sub_dirs are recursively
retrieved and returned shortened to begin with the string of subdir.
Args:
module: module object
sub_dirs: sub folder name(s) - tuple
Returns:
makeIncludedEmptyDirectory of found dirnames.
"""
module_dir = module.getCompileTimeDirectory()
file_list = []
data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]
# Gather the full file list, probably makes no sense to include bytecode files
file_list = sum(
(
getFileList(
data_dir, ignore_dirs=("__pycache__",), ignore_suffixes=(".pyc",)
)
for data_dir in data_dirs
),
[],
)
if not file_list:
msg = "No files or folders found for '%s' in subfolder(s) %r (%r)." % (
module.getFullName(),
sub_dirs,
data_dirs,
)
self.warning(msg)
is_package = (
module.isCompiledPythonPackage() or module.isUncompiledPythonPackage()
)
# We need to preserve the package target path in the dist folder.
if is_package:
package_part = module.getFullName().asPath()
else:
package = module.getFullName().getPackageName()
if package is None:
package_part = ""
else:
package_part = package.asPath()
item_set = OrderedSet()
for f in file_list:
target = os.path.join(package_part, os.path.relpath(f, module_dir))
dir_name = os.path.dirname(target)
item_set.add(dir_name)
for dest_path in item_set:
yield self.makeIncludedEmptyDirectory(
dest_path=dest_path,
reason="Subdirectories of module %s" % module.getFullName(),
tags="config",
) | PypiClean |
/MtxDrawer-0.0.15.tar.gz/MtxDrawer-0.0.15/README.md | # Draw Mtx As Thumbnail - 将 Mtx 画为缩略图

## 样例
| <br />平均值 | <br />不处理 |
| :-------------------------------------------------: | :--------------------------------------------: |
| <br /><b>取 0 次 log</b> | <br /><b>绝对值</b> |
## 安装
```shell
pip3 install MtxDrawer -U
```
自动安装依赖并注册一个命令`mtx-drawer`
【注意】:由于依赖库的版本更新可能导致旧版本不再能运行,请注意保持此工具为最新版本。
## 运行
```shell
mtx-drawer draw-one [--force] [--log-times <n: int>] [--mat-size <n: int>] [--block-size <n: int>] <filepath> <-ops <aver | abs | real | log>... >
mtx-drawer draw [--force] [--log-times <n: int>] [--mat-size <n: int>] [--block-size <n: int>] <-ops <aver | abs | real | log>... >
```
### 解释
1. 第一条命令是为文件`<filepath>`画缩略图 (`filepath`无需是 mtx 文件,但需要能被`scipy.io.mmread`读取),其中`<ops>`是<font color="red">必填的多选参数</font>只能在命令末尾赋值,用于指定缩略图的类型,其中`<aver>`表示平均值,`<abs>`表示绝对值,`<real>`表示实际值,`<log>`表示对数值进行对数变换; `force`表示强制重新画缩略图默认为否,`log-times`表示画缩略图对像素值取 log 的次数默认为 2,`mat-size`表示缩略图的尺寸(默认是 200 \* 200 的图像),`block-size`直接设置块大小(开启次选项后将覆盖掉`mat-size`参数)。
2. 第二条命令会递归搜索当前路径下的所有 mtx 文件并绘制缩略图,参数含义与上一条描述一致。
注意: ops 作为必填多选参数,必须在命令的末尾为其赋值,否则会报错。
### 例子
```shell
mtx-drawer draw-one 2.mtx --force --log-times 0 -ops aver abs log real # 一次性绘制2.mtx的四种图,log取0次,强制替换
mtx-drawer draw-one 2.mtx -ops aver abs log real # 一次性绘制2.mtx的四种图,log取2次,不强制替换
mtx-drawer draw --force -ops aver abs log # 绘制当前目录及子目录下的全部mtx文件的三种图,强制替换
mtx-drawer draw -ops aver abs log real # 绘制当前目录及子目录下的全部mtx文件的三种图,不强制替换且log取2次
```
### 特殊说明
子矩阵划分方式:当行列不相等时,较大的属性被分为`matSize`块,较小的属性为`rate * matSize`块;其中`rate`为$ min(m,n)/max(m,n) $
### 命令行补全
基于[QuickProject.Commmander](https://github.com/Rhythmicc/QuickProject)开发的命令行 APP 可以提供 zsh 或 [fig](https://fig.io/) 的补全脚本:
```sh
mtx-drawer complete
```
效果:

## 基于 Drawer 类的自定义开发
当默认提供的四种算法无法满足需要时,可以按如下方式自行设计算法:
```python
from MtxDrawer.Drawer import Drawer
"""
您可以通过如下方式自定义算法并通过Drawer对象的call方法来调用;
自定义算法可接受的参数将在下表中说明,此外,自定义算法必须返回一个数值用于表示color_bar的显示范围(返回1则表示-1~1)
"""
@Drawer.algorithmWrapper() # 算法装饰器
def myOwnAlgorithm(mat, extern_arg): # 参数命名要符合下表的要求,mat是下表第9项,extern_arg是下表第15项
print(extern_arg)
return max(abs(max([max(i) for i in mat])), abs(min([min(i) for i in mat])))
drawer = Drawer('dist/2.mtx', False, set_log-times=0, force_update=True)
drawer.call('myOwnAlgorithm', extern_arg=1)
"""
---结果---
[信息] 路径模板: "dist/2_{}.svg"
1
[信息] absVal = 1
"""
```
| 序号 | 合法参数 | 说明 |
| :--: | -------------- | -------------------------------------------------- |
| 1 | `has_aver` | 是否有取平均值选项 => div 是否可用 |
| 2 | `log-times` | 外部设定的取 log 的次数 |
| 3 | `mat-size` | 矩阵行列值较大的属性被分的块数 |
| 4 | `mtx` | 文件的 scipy.sparse.coo\*matrix 对象,未做任何更改 |
| 5 | `coo_shape` | mtx 的尺寸 |
| 6 | `coo_data` | 矩阵的非零元值 |
| 7 | `coo_rows` | 矩阵的非零元素行索引映射到 mat 的行值 |
| 8 | `coo_cols` | 矩阵的非零元素列索引映射到 mat 的列值 |
| 9 | `mat` | 被初始化好的二维画布对象,类型为 numpy.array |
| 10 | `div` | 子矩阵非零元数,只有当 has_aver 为 True 时才会有效 |
| 11 | `row_size` | mat 的行数 |
| 12 | `col_size` | mat 的列数 |
| 13 | `row_block_sz` | 划分的子矩阵的行数 |
| 14 | `col_block_sz` | 划分的子矩阵的列数 |
| 15 | `extern_*` | 额外的参数命名方式,需以"extern_xx=bala"的方式调用 |
### 现代 IDE 下的提示

| PypiClean |
/MCdeck-0.6.3-py3-none-any.whl/mcdeck/script.py | from argparse import ArgumentParser
import hashlib
import http.client
import os.path
import pathlib
import posixpath
import sys
import tempfile
import urllib.request
import zipfile
from PySide6 import QtWidgets, QtCore, QtGui
from lcgtools import LcgException
from lcgtools.graphics import LcgCardPdfGenerator, LcgImage
from lcgtools.util import LcgAppResources
import mcdeck
from mcdeck.marvelcdb import MarvelCDB
import mcdeck.octgn as octgn
from mcdeck.settings import Settings, SettingsDialog
from mcdeck.tts import TTSExportDialog
from mcdeck.util import loadImageFromFileDialog, ErrorDialog, download_image
from mcdeck.util import DeckUndoBuffer, to_posix_path, to_local_path
from mcdeck.util import image_mime_type, parse_mcd_file_section_header
class MCDeck(QtWidgets.QMainWindow):
"""Main app window."""
settingsChanged = QtCore.Signal() # App settings changed
settings = Settings()
conf = None
root = None
deck = None
game = None
_front_on_top = True
_clipboard = None
_export_pdf_action = None
def __init__(self):
super().__init__()
# Set main window title
self.setWindowTitle('MCdeck - custom card deck builder')
# Set up main window layout with a Deck as the single contained widget
deck = Deck()
if MCDeck.root:
raise LcgException('Cannot only instantiate one single MCDeck')
else:
MCDeck.root = self
MCDeck.deck = deck
layout = QtWidgets.QGridLayout()
layout.addWidget(deck, 0, 0)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
# Define actions
icon = self.style().standardIcon(QtWidgets.QStyle.SP_FileIcon)
action = QtGui.QAction(icon, '&New', self)
action.setShortcut('Ctrl+N')
action.triggered.connect(deck.newDeck)
action.setStatusTip('Discard current cards and start new deck')
new_action = action
icon = self.style().standardIcon(QtWidgets.QStyle.SP_DialogOpenButton)
action = QtGui.QAction(icon, '&Open ...', self)
action.setShortcut('Ctrl+O')
action.triggered.connect(deck.openDeck)
action.setStatusTip('Open deck from loadable .zip or .mcd')
load_action = action
icon = self.style().standardIcon(QtWidgets.QStyle.SP_DialogSaveButton)
action = QtGui.QAction(icon, '&Save', self)
action.setShortcut('Ctrl+S')
action.triggered.connect(deck.saveDeck)
action.setStatusTip('Save the deck')
self.__save_action = action
action = QtGui.QAction('Save &as ...', self)
action.setShortcut('Ctrl+Shift+S')
action.triggered.connect(deck.saveDeckAs)
action.setStatusTip('Save the deck, selecting a new filename')
self.__save_as_action = action
action = QtGui.QAction('&PDF ...', self)
action.setShortcut('Ctrl+P')
action.triggered.connect(deck.exportPdf)
action.setStatusTip('Export deck as a printable PDF document')
self._export_pdf_action = action
action = QtGui.QAction('&TTS ...', self)
action.setShortcut('Ctrl+T')
action.triggered.connect(deck.exportTts)
action.setStatusTip('Export Tabletop Simulator deck front/back images')
export_tts_action = action
action = QtGui.QAction('&Card set ...', self)
action.setEnabled(False)
action.triggered.connect(deck.exportOctgnCardSet)
action.setStatusTip('Export card set for OCTGN')
self.__export_octgn_card_set_action = action
action = QtGui.QAction('&Deck ...', self)
action.setEnabled(False)
action.triggered.connect(deck.exportOctgnDeck)
action.setStatusTip('Export OCTGN .o8d deck')
self.__export_octgn_deck_action = action
action = QtGui.QAction('&Exit', self)
action.setShortcut('Ctrl+Q')
action.setStatusTip('Exit program')
action.triggered.connect(self.exitAction)
exit_action = action
action = QtGui.QAction('Undo', self)
action.setShortcut('Ctrl+Z')
action.setStatusTip('Undo')
action.triggered.connect(deck.undoAction)
action.setEnabled(False)
self.__undo_action = action
action = QtGui.QAction('Redo', self)
action.setShortcut('Ctrl+Y')
action.setStatusTip('Redo')
action.triggered.connect(deck.redoAction)
action.setEnabled(False)
self.__redo_action = action
action = QtGui.QAction('Cut', self)
action.setShortcut('Ctrl+X')
action.setStatusTip('Cut selected cards (only within app)')
action.triggered.connect(deck.cutCards)
action.setEnabled(False)
self.__cut_action = action
action = QtGui.QAction('Copy', self)
action.setShortcut('Ctrl+C')
action.setStatusTip('Copy selected cards (only within app)')
action.triggered.connect(deck.copyCards)
action.setEnabled(False)
self.__copy_action = action
action = QtGui.QAction('Copy front image', self)
action.setShortcut('Ctrl+Shift+F')
action.setStatusTip('Copy front of selected card')
action.triggered.connect(deck.copyCardFront)
action.setEnabled(False)
self.__copy_front = action
action = QtGui.QAction('Copy back image', self)
action.setShortcut('Ctrl+Shift+B')
action.setStatusTip('Copy back of selected card')
action.triggered.connect(deck.copyCardBack)
action.setEnabled(False)
self.__copy_back = action
action = QtGui.QAction('Paste', self)
action.setShortcut('Ctrl+V')
action.setStatusTip('Paste after current (selected) card(s)')
action.triggered.connect(deck.paste)
action.setEnabled(False)
self.__paste_action = action
action = QtGui.QAction('Paste before', self)
action.setStatusTip('Paste before current (selected) card(s)')
action.triggered.connect(deck.pasteBefore)
action.setEnabled(False)
self.__paste_before_action = action
action = QtGui.QAction('Paste as &player', self)
action.setShortcut('Ctrl+1')
action.setStatusTip('Paste as player type card')
action.triggered.connect(deck.pastePlayer)
action.setEnabled(False)
self.__paste_player_action = action
action = QtGui.QAction('Paste as &encounter', self)
action.setShortcut('Ctrl+2')
action.setStatusTip('Paste as encounter type card')
action.triggered.connect(deck.pasteEncounter)
action.setEnabled(False)
self.__paste_encounter_action = action
action = QtGui.QAction('Paste as v&illain', self)
action.setShortcut('Ctrl+3')
action.setStatusTip('Paste as villain type card')
action.triggered.connect(deck.pasteVillain)
action.setEnabled(False)
self.__paste_villain_action = action
action = QtGui.QAction('&Settings', self)
action.setShortcut('Ctrl+,')
action.setStatusTip('Edit settings')
action.triggered.connect(self.menu_sel_settings)
settings_action = action
action = QtGui.QAction('&Reset settings', self)
action.setStatusTip('Reset settings to default values')
action.triggered.connect(self.menu_res_settings)
reset_action = action
action = QtGui.QAction('Show card &back on top', self)
action.setCheckable(True)
action.setShortcut('Ctrl+B')
action.setStatusTip('Show the back image on top')
action.toggled.connect(deck.back_image_on_top)
self.__back_on_top = action
action = QtGui.QAction('&Reset', self)
action.setShortcut('Ctrl+0')
action.setStatusTip('Reset zoom to 100% zoom level')
action.triggered.connect(deck.zoom_reset)
zoom_reset_action = action
action = QtGui.QAction('Zoom &In', self)
key = QtGui.QKeySequence(QtCore.Qt.CTRL | QtCore.Qt.Key_Plus)
action.setShortcut(key)
action.setStatusTip('Zoom in one zoom level')
action.triggered.connect(deck.zoom_in)
zoom_in_action = action
action = QtGui.QAction('Zoom &out', self)
key = QtGui.QKeySequence(QtCore.Qt.CTRL | QtCore.Qt.Key_Minus)
action.setShortcut(key)
action.setStatusTip('Zoom out one zoom level')
action.triggered.connect(deck.zoom_out)
zoom_out_action = action
action = QtGui.QAction('Select &all', self)
action.setShortcut('Ctrl+A')
action.setStatusTip('Select all cards')
action.triggered.connect(deck.selectAll)
select_all_action = action
action = QtGui.QAction('Select &none', self)
action.setShortcut('Ctrl+Shift+A')
action.setStatusTip('Unselect all cards')
action.triggered.connect(deck.selectNone)
select_none_action = action
action = QtGui.QAction('Set &player type', self)
action.setShortcut('Ctrl+4')
action.setStatusTip('Set card type to player')
action.setEnabled(False)
action.triggered.connect(deck.setPlayerType)
self.__set_player = action
action = QtGui.QAction('Set &encounter type', self)
action.setShortcut('Ctrl+5')
action.setStatusTip('Set card type to encounter')
action.setEnabled(False)
action.triggered.connect(deck.setEncounterType)
self.__set_encounter = action
action = QtGui.QAction('Set &villain type', self)
action.setShortcut('Ctrl+6')
action.setStatusTip('Set card type to villain')
action.setEnabled(False)
action.triggered.connect(deck.setVillainType)
self.__set_villain = action
action = QtGui.QAction('Set &unspecified type', self)
action.setShortcut('Ctrl+7')
action.setStatusTip('Set card type to unspecified')
action.setEnabled(False)
action.triggered.connect(deck.setUnspecifiedType)
self.__set_unspecified = action
action = QtGui.QAction('Load &front image ...', self)
action.setStatusTip('Open image file as new front side')
action.setEnabled(False)
action.triggered.connect(deck.setFrontImage)
self.__set_front_image = action
action = QtGui.QAction('Load &back image ...', self)
action.setStatusTip('Open image file as new back side')
action.setEnabled(False)
action.triggered.connect(deck.setBackImage)
self.__set_back_image = action
action = QtGui.QAction('Use &front as back', self)
action.setStatusTip('Set back side to be the same as the front image')
action.setEnabled(False)
action.triggered.connect(deck.useFrontAsBack)
self.__use_front_as_back = action
action = QtGui.QAction('&Remove back', self)
action.setStatusTip('Remove the back side image (but keep card type)')
action.setEnabled(False)
action.triggered.connect(deck.removeBackImage)
self.__remove_back_image = action
action = QtGui.QAction('Rota&te 180°', self)
action.setShortcut('Ctrl+R')
action.setStatusTip('Rotates the front card(s) 180°')
action.setEnabled(False)
action.triggered.connect(deck.rotateHalfCircle)
self.__rotate_half_circle = action
action = QtGui.QAction('Rotate 90° (&clockwise)', self)
action.setStatusTip('Rotates the front card(s) 90° clockwise')
action.setEnabled(False)
action.triggered.connect(deck.rotateClockwise)
self.__rotate_clockwise = action
action = QtGui.QAction('Rotate 90° (&anticlockwise)', self)
action.setStatusTip('Rotates the front card(s) 90° anticlockwise')
action.setEnabled(False)
action.triggered.connect(deck.rotateAntiClockwise)
self.__rotate_anti_clockwise = action
action = QtGui.QAction('Delete', self)
key = QtGui.QKeySequence(QtCore.Qt.Key_Delete)
action.setShortcut(key)
action.setStatusTip('Deletes selected card(s)')
action.setEnabled(False)
action.triggered.connect(deck.deleteCards)
self.__delete_cards = action
action = QtGui.QAction('&Get back images ...', self)
action.setStatusTip('Install card back images from Hall of Heroes')
action.triggered.connect(self.menu_download_card_backs)
self.__download_card_backs = action
action = QtGui.QAction('Import card ...', self)
action.setShortcut('Ctrl+M')
action.setStatusTip('Import card from marvelcdb.com')
action.triggered.connect(self.menu_mcdb_import_card)
mcdb_import_card = action
action = QtGui.QAction('Import deck ...', self)
action.setShortcut('Shift+Ctrl+M')
action.setStatusTip('Import deck from marvelcdb.com')
action.triggered.connect(self.menu_mcdb_import_deck)
mcdb_import_deck = action
action = QtGui.QAction('Enable', self)
action.setStatusTip('Enable OCTGN metadata for deck')
action.triggered.connect(self.menu_octgn_enable)
self._octgn_enable = action
action = QtGui.QAction('&Edit ...', self)
action.setShortcut('Ctrl+E')
action.setStatusTip('Edit OCTGN metadata')
action.setEnabled(False)
action.triggered.connect(self.menu_octgn_edit)
self._octgn_edit = action
action = QtGui.QAction('&Edit Selected ...', self)
action.setShortcut('Shift+Ctrl+E')
action.setStatusTip('Edit OCTGN metadata for selected card(s)')
action.setEnabled(False)
action.triggered.connect(self.menu_octgn_edit_selected)
self._octgn_edit_selected = action
action = QtGui.QAction('&Delete', self)
action.setStatusTip('Delete OCTGN metadata')
action.setEnabled(False)
action.triggered.connect(self.menu_octgn_delete)
self._octgn_delete = action
action = QtGui.QAction('Imp&ort card(s) ...', self)
action.setShortcut('Shift+Ctrl+O')
action.setStatusTip('Import card(s) from local OCTGN database')
action.triggered.connect(self.menu_octgn_import)
self._octgn_import = action
action = QtGui.QAction('Import from .o8d ...', self)
action.setStatusTip('Import card(s) from local OCTGN database')
action.triggered.connect(self.menu_octgn_import_o8d)
self._octgn_import_o8d = action
action = QtGui.QAction('&Install deck as card set', self)
action.setStatusTip('Install the current deck directly into OCTGN')
action.setEnabled(False)
action.triggered.connect(self.menu_octgn_install)
self._octgn_install = action
action = QtGui.QAction('&Uninstall deck as card set', self)
action.setStatusTip('Uninstalls card set with same ID as current deck '
' from OCTGN')
action.setEnabled(False)
action.triggered.connect(self.menu_octgn_uninstall)
self._octgn_uninstall = action
action = QtGui.QAction('Create virtual installation', self)
action.setStatusTip('Create a virtual OCTGN Data/ directory')
action.triggered.connect(self.menu_octgn_create_virtual_installation)
self._octgn_create_virtual_installation = action
action = QtGui.QAction('Install image packs', self)
action.setStatusTip('Install OCTGN MC image packs')
action.triggered.connect(self.menu_octgn_install_image_packs)
self._octgn_install_image_packs = action
action = QtGui.QAction('Install .zip card set', self)
action.setStatusTip('Install a (set of) .zip format card set(s)')
action.triggered.connect(self.menu_octgn_card_set_installer)
self._octgn_card_set_installer = action
action = QtGui.QAction('Uninstall .zip card set', self)
action.setStatusTip('Uninstalls a (set of) .zip format card set(s)')
action.triggered.connect(self.menu_octgn_card_set_uninstaller)
self._octgn_card_set_uninstaller = action
action = QtGui.QAction('&About', self)
action.setStatusTip('Information about this app')
action.triggered.connect(self.helpAbout)
help_about = action
action = QtGui.QAction('&Resources', self)
action.setStatusTip('Information about relevant resources')
action.triggered.connect(self.helpResources)
help_resources = action
action = QtGui.QAction('&Usage', self)
action.setStatusTip('Information about usage')
action.triggered.connect(self.helpUsage)
help_usage = action
# Menu bar
menu_bar = self.menuBar()
# Former workaround for non-functional OSX menu integration
# if platform.system() == 'Darwin':
# menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(new_action)
file_menu.addSeparator()
file_menu.addAction(load_action)
file_menu.addAction(self.__save_action)
file_menu.addAction(self.__save_as_action)
export_menu = file_menu.addMenu('&Export')
export_menu.addAction(self._export_pdf_action)
export_menu.addAction(export_tts_action)
export_octgn_menu = export_menu.addMenu('&Octgn')
export_octgn_menu.addAction(self.__export_octgn_card_set_action)
export_octgn_menu.addAction(self.__export_octgn_deck_action)
file_menu.addSeparator()
file_menu.addAction(exit_action)
edit_menu = menu_bar.addMenu('&Edit')
edit_menu.addAction(self.__undo_action)
edit_menu.addAction(self.__redo_action)
edit_menu.addSeparator()
edit_menu.addAction(self.__cut_action)
edit_menu.addAction(self.__copy_action)
edit_menu.addAction(self.__copy_front)
edit_menu.addAction(self.__copy_back)
edit_menu.addAction(self.__paste_action)
paste_menu = edit_menu.addMenu('Paste &special')
paste_menu.addAction(self.__paste_before_action)
paste_menu.addAction(self.__paste_player_action)
paste_menu.addAction(self.__paste_encounter_action)
paste_menu.addAction(self.__paste_villain_action)
edit_menu.addSeparator()
edit_menu.addAction(select_all_action)
edit_menu.addAction(select_none_action)
edit_menu.addSeparator()
edit_menu.addAction(settings_action)
edit_menu.addAction(reset_action)
view_menu = menu_bar.addMenu('&View')
view_menu.addAction(self.__back_on_top)
zoom_menu = view_menu.addMenu('&Zoom')
zoom_menu.addAction(zoom_reset_action)
zoom_menu.addAction(zoom_in_action)
zoom_menu.addAction(zoom_out_action)
selection_menu = menu_bar.addMenu('&Selection')
selection_menu.addAction(self.__set_player)
selection_menu.addAction(self.__set_encounter)
selection_menu.addAction(self.__set_villain)
selection_menu.addAction(self.__set_unspecified)
selection_menu.addSeparator()
selection_menu.addAction(self.__set_front_image)
selection_menu.addAction(self.__set_back_image)
selection_menu.addAction(self.__use_front_as_back)
selection_menu.addAction(self.__remove_back_image)
selection_menu.addSeparator()
selection_menu.addAction(self.__rotate_half_circle)
selection_menu.addAction(self.__rotate_clockwise)
selection_menu.addAction(self.__rotate_anti_clockwise)
selection_menu.addSeparator()
selection_menu.addAction(self.__delete_cards)
tools_menu = menu_bar.addMenu('&Tools')
tools_menu.addAction(self.__download_card_backs)
mcdb_menu = tools_menu.addMenu('&MarvelCDB')
mcdb_menu.addAction(mcdb_import_card)
mcdb_menu.addAction(mcdb_import_deck)
octgn_menu = tools_menu.addMenu('&Octgn')
octgn_menu.addAction(self._octgn_enable)
octgn_menu.addAction(self._octgn_edit)
octgn_menu.addAction(self._octgn_edit_selected)
octgn_menu.addAction(self._octgn_delete)
octgn_menu.addSeparator()
octgn_menu.addAction(self._octgn_import)
octgn_menu.addAction(self._octgn_import_o8d)
octgn_menu.addSeparator()
octgn_menu.addAction(self._octgn_install)
octgn_menu.addAction(self._octgn_uninstall)
octgn_menu.addSeparator()
octgn_db_menu = octgn_menu.addMenu('Manage database')
octgn_db_menu.addAction(self._octgn_create_virtual_installation)
octgn_db_menu.addAction(self._octgn_install_image_packs)
octgn_db_menu.addSeparator()
octgn_db_menu.addAction(self._octgn_card_set_installer)
octgn_db_menu.addAction(self._octgn_card_set_uninstaller)
selection_menu = menu_bar.addMenu('&Help')
selection_menu.addAction(help_about)
selection_menu.addAction(help_usage)
selection_menu.addAction(help_resources)
# Add a toolbar
toolbar = QtWidgets.QToolBar('Main toolbar')
toolbar.setIconSize(QtCore.QSize(16,16))
toolbar.addAction(new_action)
toolbar.addAction(load_action)
toolbar.addAction(self.__save_action)
self.addToolBar(toolbar)
# Add a status bar
self.setStatusBar(QtWidgets.QStatusBar(self))
# Set up some signal/slot connections
deck.hasSelection.connect(self.deckHasSelection)
deck.hasClipboard.connect(self.deckHasClipboard)
self.settingsChanged.connect(deck.settingsChanged)
deck._undo.haveUndo.connect(self.__undo_action.setEnabled)
deck._undo.haveRedo.connect(self.__redo_action.setEnabled)
deck.deckChanged.connect(self.deckChanged)
deck.filenameChange.connect(self.updateTitleFilename)
deck.deckHasOctgn.connect(self.enableOctgn)
# Monitor system clipboard, process once to update menu items
MCDeck.clipboard().dataChanged.connect(deck.systemClipboardChanged)
deck.systemClipboardChanged()
# Enable Drag & Drop onto main window
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
mime = event.mimeData()
if (mime.hasUrls() or mime.hasImage() or
'application/x-qt-image' in mime.formats()):
event.accept()
else:
event.ignore()
event.accept()
def dropEvent(self, event):
mime = event.mimeData()
# If file is a single .zip or .mcd file, process as an "open file"
# event rather than adding card(s) to the project
if mime.hasUrls() and len(mime.urls()) == 1:
url, = mime.urls()
if url.isLocalFile():
path = url.toLocalFile()
_ext = path[-4:].lower()
if _ext in ('.zip', '.mcd', '.o8d'):
if MCDeck.deck.has_cards():
_q = QtWidgets.QMessageBox.question
_k = QtWidgets.QMessageBox.Open
_k = _k | QtWidgets.QMessageBox.Cancel
_msg = ('Deck contains cards. Discard current deck to '
'load new data?')
btn = _q(self, 'Discard current deck?', _msg, _k)
if btn == QtWidgets.QMessageBox.Cancel:
return
if _ext in ('.zip', '.mcd'):
MCDeck.deck._open(path)
return
else:
MCDeck.deck.clear(undo=True)
try:
num = octgn.load_o8d_cards(path, parent=self)
except Exception as e:
ErrorDialog(self, '.o8d import error', 'Could not '
f'import .o8d file: {e}').exec()
MCDeck.deck._undo_action(deselect=False, purge=True)
else:
MCDeck.deck._deck_changed()
MCDeck.deck.reset()
return
# For any other situation, handle through the paste method
MCDeck.deck.paste(droppedMimeData=mime)
@classmethod
def clipboard(cls):
"""Application QClipboard object."""
if cls._clipboard is None:
cls._clipboard = QtGui.QGuiApplication.clipboard()
return cls._clipboard
@QtCore.Slot()
def menu_sel_settings(self):
settings = SettingsDialog(MCDeck.settings)
if settings.exec():
self.settingsChanged.emit()
@QtCore.Slot()
def menu_res_settings(self):
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel
confirm = _dfun(self, 'Confirm reset', 'Do you really want to reset '
'settings to default values?', _keys)
if confirm == QtWidgets.QMessageBox.Yes:
MCDeck.settings.clear()
MCDeck.deck.reset()
@QtCore.Slot()
def menu_download_card_backs(self):
dialog = QtWidgets.QDialog(self)
main_layout = QtWidgets.QVBoxLayout()
_hoh_url = 'https://hallofheroeslcg.com/custom-content/'
_txt = (f'<p>Use card back images from <a href="{_hoh_url}">'
'Hall of Heroes</a> as the default card backs.</p>'
'<p>Selecting an image set will (try to) download player, '
'encounter and villain card back images, and update settings '
'to use them as the new defaults.</p>'
'<p>Note: these images may not be the optimal ones for use with'
' your printer, and depending on your quality and/or color '
'correction requirements, you may be better off getting card '
'back images from other sources.</p>')
msg = QtWidgets.QLabel(_txt)
msg.setOpenExternalLinks(True)
msg.setWordWrap(True)
main_layout.addWidget(msg)
card_selector = QtWidgets.QHBoxLayout()
card_selector.addWidget(QtWidgets.QLabel('Select card set:'))
cardset_cb = QtWidgets.QComboBox()
_tip = ('Card set to download and set as default:\n'
'- Branded, intended for print (source: Hall of Heroes)\n'
'- Branded, intended for TTS (source: Homebrew)\n'
'- Promo (source: Hall of Heroes)\n'
'- Fans (source: Hall of Heroes)')
cardset_cb.setToolTip(_tip)
for option in ('Branded, print (HoH)', 'Branded, TTS (Homebrew)',
'Promo', 'Fans'):
cardset_cb.addItem(option)
card_selector.addWidget(cardset_cb)
main_layout.addLayout(card_selector)
buttons = QtWidgets.QHBoxLayout()
buttons.addStretch(1)
btns = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok |
QtWidgets.QDialogButtonBox.Cancel)
btns.rejected.connect(dialog.reject)
btns.accepted.connect(dialog.accept)
buttons.addWidget(btns)
main_layout.addLayout(buttons)
dialog.setLayout(main_layout)
if dialog.exec():
cardset = cardset_cb.currentIndex()
_dict = {0:['marvel-player-back','marvel-encounter-back',
'marvel-villain-back'],
2:['promo-player-back', 'promo-encounter-back',
'promo-villain-back'],
3:['fan-back-player', 'fan-back-encounter',
'fan-back-villain']}
if cardset in _dict:
pre = 'https://hallofheroeshome.files.wordpress.com/2021/02/'
post = '.png'
urls = [pre + s + post for s in _dict[cardset]]
elif cardset == 1:
urls = [('https://cdn.discordapp.com/attachments/64131799'
'9168454685/869297402912321616/trasera_azul.png'),
('https://cdn.discordapp.com/attachments/64131799'
'9168454685/869297401549160469/trasera_naranja.png'),
('https://cdn.discordapp.com/attachments/64131799'
'9168454685/869297402161537024/trasera_lila.png')]
else:
raise RuntimeError('Shold never happen')
try:
# Resolve local file names for images
conf = LcgAppResources(appname='mcdeck', author='Cloudberries')
conf_dir = conf.user_data_dir()
back_dir = os.path.join(conf_dir, 'card_back')
img_paths = []
for url in urls:
_basename = hashlib.sha256(url.encode('utf-8')).hexdigest()
_path = os.path.join(back_dir, _basename)
img_paths.append(f'{_path}.png')
# Download images if they do not already exist
for img_path in img_paths:
if not os.path.isfile(img_path):
cached = False
break
else:
cached = True
# If not cached, retreive images and store locally
if not cached:
images = []
for url in urls:
img = download_image(url)
img.setWidthMm(63.5)
img.setHeightMm(88)
images.append(img)
# Store downloaded images in standard location
pathlib.Path(back_dir).mkdir(parents=True, exist_ok=True)
for img, path in zip(images, img_paths):
img.save(path)
# Update settings
settings = MCDeck.settings
settings.card_back_file_player = img_paths[0]
settings.card_back_file_encounter = img_paths[1]
settings.card_back_file_villain = img_paths[2]
if cardset == 1:
_bleed = 0
else:
_bleed = 2
settings.player_bleed_mm = _bleed
settings.encounter_bleed_mm = _bleed
settings.villain_bleed_mm = _bleed
_i = QtWidgets.QMessageBox.information
_msg = ('Settings have been updated to use the images as the '
'default card backs for player, encounter and '
'villain cards')
if cached:
_msg += ' (using cached images).'
else:
_msg += '.'
_i(self, 'Settings updated', _msg)
self.deck.reset()
except Exception as e:
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
err('Operation error', f'Could not update images: {e}')
@QtCore.Slot()
def menu_mcdb_import_card(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
# Get card ID(s) or URL(s)
dialog = MarvelCDBCardImportDialog(self)
if not dialog.exec():
return
# Load cards database (with progress bar) if not already loaded
try:
have_db = self._loadMarvelCDB()
except Exception as e:
err('MarvelCDB database load error',
f'Could not load database: {e}')
return
else:
if not have_db:
return
# Parse entered values, generating (hopefully valid) IDs.
s = dialog._le.text().strip()
if not s:
err('No input', 'No ID or URL was entered')
return
s = s.replace(',', ' ')
s_l = s.split(' ')
s_l = [s.strip() for s in s_l if s]
if not s_l:
err('Invalid input', 'Invalid format of input')
return
id_l = []
url_prefix = 'https://marvelcdb.com/card/'
for s in s_l:
if s.startswith(url_prefix):
s = s[len(url_prefix):]
s = s.lower()
if s.endswith('b'):
# If alter-ego card, replace with its opposite hero card
s = s[:-1] + 'a'
id_l.append(s)
# Load cards for the provided IDs
cards = []
placeholder = dialog._create_placeholders_chk.isChecked()
self.__operation_cancelled = False
_qpd = QtWidgets.QProgressDialog
dlg = _qpd('Importing card(s)', 'Cancel', 0, len(cards))
dlg.show()
for code in id_l:
try:
_card = MarvelCDB.card(code)
if _card is None:
err('No such card',
f'No card with code {code} in local MarvelCDB index')
return
card = _card.to_mcdeck_card(placeholder=placeholder)
dlg.setValue(dlg.value() + 1)
QtCore.QCoreApplication.processEvents() # Force Qt update
if self.__operation_cancelled:
err('Operation cancelled', 'Operation cancelled by user.')
return
except Exception as e:
dlg.hide()
err('Card import failed', 'Card import failed for card with '
f'id {code}: {e}')
return
else:
cards.append(card)
dlg.hide()
# Add card(s) to the deck
if not MCDeck.deck._octgn:
self.menu_octgn_enable()
MCDeck.deck._undo.add_undo_level(hide=False)
for card in cards:
self.deck.addCardObject(card)
self.deck.reset()
@QtCore.Slot()
def menu_mcdb_import_deck(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
# Get deck ID or URL
dialog = MarvelCDBDeckImportDialog(self)
if not dialog.exec():
return
# Load cards database (with progress bar) if not already loaded
try:
have_db = self._loadMarvelCDB()
except Exception as e:
err('MarvelCDB database load error',
f'Could not load database: {e}')
return
else:
if not have_db:
return
# Parse entered value as a (hopefully) deck ID
s = dialog._le.text().strip()
if not s:
err('No input', 'No ID or URL was entered')
return
url_prefix = 'https://marvelcdb.com/decklist/view/'
if s.startswith(url_prefix):
s = s[len(url_prefix):]
s = s.split('/')[0]
# Load the deck
try:
deck = MarvelCDB.load_deck(s)
except Exception as e:
err('Deck import failed', 'Deck import failed for deck ID '
f'{s}: {e}')
return
# Filter cards depending on whether hero and/or non-hero cards
# should be imported
import_hero_cards = dialog._include_hero_cards_chk.isChecked()
import_other_cards = dialog._include_non_hero_cards_chk.isChecked()
deck_cards = []
for card, num in deck.cards:
if card.belongs_to_hero_set():
if import_hero_cards:
deck_cards.append((card, num))
else:
if import_other_cards:
deck_cards.append((card, num))
if not deck_cards:
err('Nothing to import', 'No cards to import (after applying '
'settings on whether to import hero/non-hero cards)')
return
# Load all cards from the deck
cards = []
placeholder = dialog._create_placeholders_chk.isChecked()
num_cards = sum(num for card, num in deck_cards)
self.__operation_cancelled = False
_qpd = QtWidgets.QProgressDialog
dlg = _qpd('Importing card(s)', 'Cancel', 0, num_cards)
dlg.show()
for card, num in deck_cards:
try:
result = card.to_mcdeck_card(copies=num,
placeholder=placeholder)
dlg.setValue(dlg.value() + num)
QtCore.QCoreApplication.processEvents() # Force Qt update
if self.__operation_cancelled:
err('Operation cancelled', 'Operation cancelled by user.')
return
except Exception as e:
dlg.hide()
err('Card import failed', 'Card import failed for card with '
f'id {card.code}: {e}')
return
else:
if num == 1:
cards.append(result)
else:
for c in result:
cards.append(c)
dlg.hide()
# Add card(s) to the deck
if not MCDeck.deck._octgn:
self.menu_octgn_enable()
MCDeck.deck._undo.add_undo_level(hide=False)
for card in cards:
self.deck.addCardObject(card)
self.deck.reset()
@QtCore.Slot()
def menu_octgn_enable(self):
if not MCDeck.deck._octgn:
MCDeck.deck._octgn = octgn.OctgnCardSetData(name='')
for i, card in enumerate(MCDeck.deck._card_list_copy):
card._octgn = octgn.OctgnCardData(name='')
self.enableOctgn(True)
@QtCore.Slot()
def menu_octgn_edit(self):
MCDeck.deck._undo.add_undo_level(hide=False)
title = 'Edit OCTGN metadata (entire deck)'
if octgn.OctgnDataDialog(self, MCDeck.deck, title=title).exec():
MCDeck.deck._deck_changed()
else:
MCDeck.deck._undo_action(deselect=False, purge=True)
@QtCore.Slot()
def menu_octgn_edit_selected(self):
cards = MCDeck.deck.selected_cards()
if cards:
MCDeck.deck._undo.add_undo_level(hide=False)
t = f'Edit OCTGN metadata ({len(cards)} selected cards)'
if octgn.OctgnDataDialog(self, MCDeck.deck, cards, title=t).exec():
MCDeck.deck._deck_changed()
else:
MCDeck.deck._undo_action(deselect=False, purge=True)
@QtCore.Slot()
def menu_octgn_delete(self):
if MCDeck.deck._octgn:
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
_msg = ('This operation removes all current Octgn metadata '
'with no undo possible. Proceed with removal?')
k = _dfun(self, 'Confirm Octgn data removal', _msg, _keys)
if k == QtWidgets.QMessageBox.Ok:
MCDeck.deck._octgn = None
MCDeck.deck._undo.clear()
self.enableOctgn(False)
@QtCore.Slot()
def menu_octgn_import(self):
MCDeck.deck._undo.add_undo_level(hide=False)
try:
dialog = octgn.OctgnCardImportDialog(self)
except Exception as e:
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
err('Octgn import error', f'Could not initiate card import: {e}')
else:
dialog.addedCards.connect(self._octgn_import_added_cards)
dialog.exec()
if dialog._imported_cards:
MCDeck.deck._deck_changed()
MCDeck.deck.reset()
else:
MCDeck.deck._undo_action(deselect=False, purge=True)
@QtCore.Slot()
def menu_octgn_import_o8d(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
if self.deck._octgn is None:
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
k = _dfun(self, 'Enable OCTGN metadata', 'Successful .o8d import '
'requires enabling OCTGN metadata. Proceed?', _keys)
if k == QtWidgets.QMessageBox.Cancel:
return
_dlg = QtWidgets.QFileDialog.getOpenFileName
_flt = 'OCTGN deck (*.o8d)'
try:
data_path = octgn.OctgnCardSetData.get_octgn_data_path(val=True)
except Exception as e:
err('Invalid data path', f'No OCTGN data path: {e}')
_dir = os.path.join(data_path, 'GameDatabase', octgn.mc_game_id,
'FanMade')
if not os.path.isdir(_dir):
_dir = data_path
path, cat = _dlg(self, 'Open MCD index or archive containing '
'an MCD index', filter=_flt, dir=_dir)
if not path:
return
MCDeck.deck._undo.add_undo_level(hide=False)
try:
num = octgn.load_o8d_cards(path, data_path=data_path, parent=self)
except Exception as e:
err('.o8d import error', f'Could not import: {e}')
MCDeck.deck._undo_action(deselect=False, purge=True)
raise(e)
else:
MCDeck.deck._deck_changed()
MCDeck.deck.reset()
@QtCore.Slot()
def _octgn_import_added_cards(self):
MCDeck.deck.reset()
@QtCore.Slot()
def menu_octgn_install(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
try:
_f = octgn.OctgnCardSetData.install_octgn_card_set
success = _f(self, MCDeck.deck, MCDeck.settings)
except Exception as e:
err('OCTGN install error', f'Error: {e}')
else:
if success:
_i = QtWidgets.QMessageBox.information
_name = MCDeck.deck._octgn.name
_id = MCDeck.deck._octgn.set_id
_msg = (f'Card set "{_name}" with GUID {_id} was '
'successfully installed.')
_i(self, 'Successful installation', _msg)
else:
err('Installation failed', 'Installation did not complete')
@QtCore.Slot()
def menu_octgn_uninstall(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
try:
_f = octgn.OctgnCardSetData.uninstall_octgn_card_set
success = _f(self, MCDeck.deck)
except Exception as e:
err('OCTGN uninstall error', f'Error: {e}')
else:
if success:
_i = QtWidgets.QMessageBox.information
_id = MCDeck.deck._octgn.set_id
_msg = (f'Card set with GUID {_id} was '
'successfully uninstalled.')
_i(self, 'Successful uninstall', _msg)
else:
err('Uninstall failed', 'Uninstall did not complete')
@QtCore.Slot()
def menu_octgn_card_set_installer(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
try:
data_path = octgn.OctgnCardSetData.get_octgn_data_path(val=True)
except Exception as e:
err('Invalid OCTGN data path', f'No OCTGN data path: {e}')
_q = QtWidgets.QMessageBox.question
_k = QtWidgets.QMessageBox.Ok
_k = _k | QtWidgets.QMessageBox.Cancel
_msg = ('The card set installer will install a set of .zip files in '
'the format generated by '
'File -> Export -> Octgn -> Card Set.\n\n'
'This is intended primarily as a way to conveniently '
'reinstall sets of custom cards after an OCTGN card set '
'update (which wipes custom card sets); just keep all those '
'.zip files in some folder, and reinstall them in one single '
'operation.\n\n'
'It is also a convenient way to install a new .zip packaged '
'card set.\n\n'
'WARNING: installing a card set will wipe any previous card '
'set installed under the same card set GUID.\n\n'
'Proceed with card set installation?')
btn = _q(self, 'Confirm use of card set installer', _msg, _k)
if btn == QtWidgets.QMessageBox.Cancel:
return
_dlg = QtWidgets.QFileDialog.getOpenFileNames
_flt = 'Card set (*.zip)'
_dir = self.settings.octgn_card_sets_path
if not _dir or not os.path.isdir(_dir):
_dir = None
paths, cat = _dlg(self, 'Select card set(s) to install',
filter=_flt, dir=_dir)
if not paths:
return
installed, skipped = octgn.install_card_sets(data_path, paths)
if installed:
# Reload the OCTGN card database
octgn.OctgnCardSetData.load_all_octgn_sets(data_path=data_path,
force=True)
_i = QtWidgets.QMessageBox.information
_msg = ''
if installed:
_msg += 'The following card sets were installed:\n'
for _f in installed:
_msg += f'* {_f}\n'
_msg += '\n'
if skipped:
_msg += 'The following card sets could not be installed:\n'
for _f, _m in skipped:
_msg += f'* {_f} ({_m})\n'
_msg += '\n'
if installed:
_msg += 'The OCTGN card database has been reloaded.'
_i(self, 'Card set installation result', _msg)
@QtCore.Slot()
def menu_octgn_card_set_uninstaller(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
try:
data_path = octgn.OctgnCardSetData.get_octgn_data_path(val=True)
except Exception as e:
err('Invalid OCTGN data path', f'No OCTGN data path: {e}')
_q = QtWidgets.QMessageBox.question
_k = QtWidgets.QMessageBox.Ok
_k = _k | QtWidgets.QMessageBox.Cancel
_msg = ('The card set uninstaller will inspect a set of .zip files in '
'the format generated by '
'File -> Export -> Octgn -> Card Set and uninstall the '
'corresponding files from a local OCTGN database.\n\n'
'Proceed with selecting card sets for uninstalling?')
btn = _q(self, 'Confirm use of card set uninstaller', _msg, _k)
if btn == QtWidgets.QMessageBox.Cancel:
return
_dlg = QtWidgets.QFileDialog.getOpenFileNames
_flt = 'Card set (*.zip)'
_dir = self.settings.octgn_card_sets_path
if not _dir or not os.path.isdir(_dir):
_dir = None
paths, cat = _dlg(self, 'Select card set(s) to uninstall',
filter=_flt, dir=_dir)
if not paths:
return
uninstalled, skipped = octgn.uninstall_card_sets(data_path, paths)
if uninstalled:
# Reload the OCTGN card database
octgn.OctgnCardSetData.load_all_octgn_sets(data_path=data_path,
force=True)
_i = QtWidgets.QMessageBox.information
_msg = ''
if uninstalled:
_msg += 'The following card sets were removed:\n'
for _f in uninstalled:
_msg += f'* {_f}\n'
_msg += '\n'
if skipped:
_msg += 'The following card sets could not be removed:\n'
for _f, _m in skipped:
_msg += f'* {_f} ({_m})\n'
_msg += '\n'
if uninstalled:
_msg += 'The OCTGN card database has been reloaded.'
_i(self, 'Card set installation result', _msg)
@QtCore.Slot()
def menu_octgn_create_virtual_installation(self):
info = QtWidgets.QMessageBox(self, 'Confirm operation', '')
text = '''<p>This operation sets up a virtual OCTGN <tt>Data/</tt>
directory. Note that in order for this operation to work, the command
line tool <a href="https://git-scm.com/">git</a> <b>must be
installed</b> on the system.</p>
<p>An installation of <a href="https://www.octgn.net/">OCTGN</a> has
a user directory in which game data is installed, and a subdirectory
<tt>Data/</tt> in which all Marvel Champions related content exists.
As OCTGN is Windows-only, this content is not accessible on other
platforms.</p>
<p>What this operation does, is to set up a user selected
directory with the same structure as an OCTGN <tt>Data/</tt>
directory, including key sub-directories. It then uses <tt>git</tt>
to download the latest version of game database data from
<tt>https://github.com/Ouroboros009/OCTGN-Marvel-Champions.git</tt>.
</p>
<p>Installation of image packs needs to be performed in a separate
operation (available from Tools -> Octgn in the menu).</p>
<p>The next step is to select a parent directory for the virtual
OCTGN installation. A subdirectory <tt>Data/</tt> will be created
inside that directory. <b>Proceed with selecting parent directory of
virtual installation?</b></p>
'''
info.setInformativeText(text)
_btns = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
info.setStandardButtons(_btns)
info.setDefaultButton(QtWidgets.QMessageBox.Ok)
result = info.exec()
if result & QtWidgets.QMessageBox.Cancel:
return
_title = 'Choose directory in which to create Data/ structure'
path = QtWidgets.QFileDialog.getExistingDirectory(self, _title)
if not path:
return
_qpd = QtWidgets.QProgressDialog
dlg = _qpd('Downloading MC game database from github', 'Cancel', 0, 2)
dlg.show()
dlg.setValue(1)
QtCore.QCoreApplication.processEvents() # Force Qt update
try:
data_path = os.path.join(path, 'Data')
octgn.create_virtual_data_path(data_path)
except Exception as e:
dlg.hide()
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
err('Operation failed', f'Could not install: {e}')
return
else:
dlg.hide()
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
_msg = ('Installation was successful, data was installed in '
f'{data_path}\n\n'
'Do you wish to update the OCTGN data path in settings to '
'use the newly created virtual installation?')
k = _dfun(self, 'Choose whether to update settings', _msg, _keys)
if k == QtWidgets.QMessageBox.Yes:
MCDeck.settings.octgn_path = data_path
octgn.OctgnCardSetData.load_all_octgn_sets(data_path=data_path,
force=True)
@QtCore.Slot()
def menu_octgn_install_image_packs(self):
try:
data_path = octgn.OctgnCardSetData.get_octgn_data_path(val=True)
except Exception as e:
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
err('No OCTGN Data/ directory',
f'Could not find a valid Data/ directory: {e}')
return
info = QtWidgets.QMessageBox(self, 'Confirm operation', '')
text = '''<p>This operation installs Marvel Champions OCTGN .o8c image
packs into the OCTGN <tt>Data/</TT> directory. See the OCTGN
<a href="https://twistedsistem.wixsite.com/octgnmarvelchampions/">
MC module site</a> for information on how to download image packs.</p>
<p>Proceed with selecting image packs to install?</p>
'''
info.setInformativeText(text)
_btns = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
info.setStandardButtons(_btns)
info.setDefaultButton(QtWidgets.QMessageBox.Ok)
result = info.exec()
if result & QtWidgets.QMessageBox.Cancel:
return
_dlg = QtWidgets.QFileDialog.getOpenFileNames
_flt = 'Card set (*.o8c)'
paths, cat = _dlg(self, 'Select image pack(s) to install',
filter=_flt)
if not paths:
return
installed = []
failed = []
for o8c_path in paths:
try:
octgn.install_image_pack(data_path, o8c_path)
except Exception as e:
failed.append((o8c_path, str(e)))
else:
installed.append(o8c_path)
if installed:
octgn.OctgnCardSetData.load_all_octgn_sets(data_path=data_path,
force=True)
_i = QtWidgets.QMessageBox.information
_msg = ''
if installed:
_msg += 'The following image packs were installed:\n'
for _f in installed:
_msg += f'* {_f}\n'
_msg += '\n'
if failed:
_msg += 'The following image packs failed to install:\n'
for _f, _m in failed:
_msg += f'* {_f} ({_m})\n'
_msg += '\n'
if installed:
_msg += 'The OCTGN card database has been reloaded.'
_i(self, 'Image pack installation result', _msg)
@QtCore.Slot()
def deckHasSelection(self, status):
"""Update to whether deck has a current selection of cards."""
for w in (self.__cut_action, self.__copy_action, self.__set_player,
self.__set_encounter, self.__set_villain,
self.__set_unspecified, self.__set_front_image,
self.__set_back_image, self.__use_front_as_back,
self.__remove_back_image, self.__rotate_half_circle,
self.__rotate_clockwise, self.__rotate_anti_clockwise,
self.__delete_cards):
w.setEnabled(status)
_enable_octgn_edit_sel = bool(MCDeck.deck._octgn and status)
self._octgn_edit_selected.setEnabled(_enable_octgn_edit_sel)
selected_cards = MCDeck.deck.selected_cards()
if len(selected_cards) == 1:
self.__copy_front.setEnabled(True)
card, = selected_cards
self.__copy_back.setEnabled(card.back_img is not None)
else:
self.__copy_front.setEnabled(False)
self.__copy_back.setEnabled(False)
@QtCore.Slot()
def deckHasClipboard(self, status):
"""Update to whether deck has cards in the clipboard."""
for w in (self.__paste_action, self.__paste_before_action,
self.__paste_player_action, self.__paste_encounter_action,
self.__paste_villain_action):
w.setEnabled(status)
@QtCore.Slot()
def exitAction(self):
if MCDeck.deck._unsaved:
if self.deck.has_cards() or self.deck._undo.has_undo_information():
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel
k = _dfun(self, 'Confirm exit', 'Exit without saving?', _keys)
if k == QtWidgets.QMessageBox.Cancel:
return
self.close()
@QtCore.Slot()
def helpAbout(self):
"""Show a help->about dialog box."""
about = QtWidgets.QMessageBox(self, 'About MCdeck', '')
text = '''<p><b>MCdeck - © Cloudberries, 2022</b></p>
<p><a href="https://pypi.org/project/mcdeck/">MCdeck</a> is a custom
card deck builder app for
<a href="https://www.fantasyflightgames.com/en/products/marvel-champions-the-card-game/">
Marvel Champions: The Card Game</a>. Decks are constructed by adding
card images, and can then be exported to supported export formats.</p>
<p>Note that MCdeck is entirely fan made, and is in no way associated
with or endorsed by owners of Marvel Champions intellectual property. It
is intended entirely for using with custom user generated content.</p>
<p>MCdeck is released under the
<a href="https://www.gnu.org/licenses/gpl-3.0-standalone.html">
GNU General Public License v3.0</a> or later. License details are
included with the source code.</p>
'''
about.setInformativeText(text)
about.setStandardButtons(QtWidgets.QMessageBox.Ok)
about.setDefaultButton(QtWidgets.QMessageBox.Ok)
about.exec()
@QtCore.Slot()
def helpUsage(self):
"""Show a help->usage dialog box."""
about = QtWidgets.QMessageBox(self, 'Usage', '')
text = '''<p>MCdeck is hopefully more or less self explanatory. Most
options are explained by tool tips, and the app is not rocket science;
you can combine cards into decks, open and save decks, export to
printable PDFs, and export card sets for Tabletop Simulator or OCTGN.
</p>
<p>Many operations act on a <em>card selection</em>. A single card
can be selected by left-clicking on it. If the ctrl (meta) key is
held while clicking, the card's selection status is toggled. If the
shift key is held, then the selection is extended as a range to
include the clicked card.</p>
<p>Decks can be saved to a *.zip file, which will include a card index
on the top level (in a file "mcdeck.mcd") as well as card images in
various sub-directories. Such a card deck .zip file can be opened from
MCdeck.</p>
<p>MCdeck can also open a .mcd file directly from the local drive. That
card index will then be used to reference card images on the local
drive, rather than inside a .zip file. If you e.g. unzip a .zip file
generated by MCdeck, you can open the unpacked mcdeck.mcd file and it
will load the same (unpacked) content.</p>
<p>The app supports pasting image files and images from the system
clipboard, as well as dragging image files on the app. If a
.zip, .mcd or .o8d file is dragged onto the app, MCdeck will try to
open that file as a deck.</p>
'''
about.setInformativeText(text)
about.setStandardButtons(QtWidgets.QMessageBox.Ok)
about.setDefaultButton(QtWidgets.QMessageBox.Ok)
about.exec()
@QtCore.Slot()
def helpResources(self):
"""Show a help->resources dialog box."""
about = QtWidgets.QMessageBox(self, 'Resources', '')
text = '''<p>This tool aims to assist with using custom cards together
with <a href="https://www.fantasyflightgames.com/en/products/marvel-champions-the-card-game/">
Marvel Champions: The Card Game</a> (MC); printing cards for use with
the physical game as well as exporting card sets for use with
<a href="https://store.steampowered.com/app/286160/Tabletop_Simulator/">
Tabletop Simulator</a> or <a href="https://www.octgn.net/">OCTGN</a>.</p>
<p>The tool is intended to be a <em>supplement</em> to MC. You
will need a physical copy of the game in order to combine with custom
cards for physical play. As a user of MCdeck, you are responsible for
how you use it, including any legal restrictions related to copyrights.
There is also a <em>moral</em> obligation to ensure that fan made
custom products act as a <em>supplement</em> to the related commercial
product, in a way that benefits both the customers and the owner of the
product. Make sure you use this tool responsibly in a way that also
supports the business of MC copyright holders.</p>
<p>A good starting resources for custom content is
<a href="https://hallofheroeslcg.com/custom-content/">Hall of Heroes</a>
and the MC Homebrew
<a href="https://discordapp.com/invite/fWrvrNh">discord</a>, which is
a thriving community for custom MC content. </p>
<p>For Tabletop Simulator</a> (TTS), the mod
<a href="https://steamcommunity.com/sharedfiles/filedetails/?id=2514286571">Hitch's Table</a>
has a great implementation of MC. TTS deck images exported from MCdeck
can be imported directly into a TTS game.</p>
<p>MCdeck can interact with a local OCTGN installation with the MC
<a href="https://twistedsistem.wixsite.com/octgnmarvelchampions">
mod</a> installed. Some custom content is readily available from
Ouroboros' Google Drive folder with pre-packaged
<a href="https://drive.google.com/drive/u/1/folders/1ruQRsptiuxECyzocnQ5dirXAQmexX8tu">
heroes and scenarios</a>. MCdeck can also interact with OCTGN content
on systems that do not have OCTGN installed (including non-Windows
platforms). Select Tools -> Octgn -> Create virtual installation for
more information.</p>
<p>Your best bet for getting some level of product support is to
go to the channel #cloudberries in the Homebrew
<a href="https://discordapp.com/invite/fWrvrNh">discord</a>.
Please keep expectations regarding support on the low side; this app
is a marginal side project in the very busy life of its author.</p>
<p>MCdeck is available from the
<a href="https://pypi.org/project/mcdeck/">Python Package Index</a>,
with source on <a href="https://github.com/lcgtools/MCdeck">github</a>.
</p>
'''
about.setInformativeText(text)
about.setStandardButtons(QtWidgets.QMessageBox.Ok)
about.setDefaultButton(QtWidgets.QMessageBox.Ok)
about.exec()
@QtCore.Slot()
def deckChanged(self, changed):
self.__save_action.setEnabled(changed)
self.__save_as_action.setEnabled(True)
@QtCore.Slot()
def updateTitleFilename(self, name):
"""File name changed; update window title."""
if not name:
self.setWindowTitle('MCdeck - custom card deck builder')
else:
self.setWindowTitle(f'MCdeck: {name}')
@QtCore.Slot()
def enableOctgn(self, enable):
for w in (self._octgn_edit, self._octgn_delete, self._octgn_install,
self._octgn_uninstall, self.__export_octgn_card_set_action,
self.__export_octgn_deck_action):
w.setEnabled(enable)
self._octgn_enable.setEnabled(not enable)
_enable_octgn_edit_sel = bool(MCDeck.deck._octgn and enable)
self._octgn_edit_selected.setEnabled(_enable_octgn_edit_sel)
@QtCore.Slot()
def cancelOperation(self):
self.__operation_cancelled = True
def _loadMarvelCDB(self):
"""Loads MarvelCDB card database if not already loaded."""
if not MarvelCDB._cards:
choice_dlg = LoadMarvelCDBDialog(self)
if not choice_dlg.exec():
return False
_qpd = QtWidgets.QProgressDialog
dlg = _qpd('Loading MarvelCDB cards index ...', 'Cancel', 0, 20)
dlg.show()
try:
MarvelCDB.load_cards(all=choice_dlg._all, progress=dlg)
finally:
dlg.hide()
# Disable PDF generation after downloading cards index
self._export_pdf_action.setEnabled(False)
return True
else:
return True
class Deck(QtWidgets.QScrollArea):
"""View for a deck of cards."""
hasSelection = QtCore.Signal(bool) # Has card(s) selected
hasClipboard = QtCore.Signal(bool) # Has cards in clipboard
deckChanged = QtCore.Signal(bool) # Deck is changed since initial/save
filenameChange = QtCore.Signal(str) # Project filename changed
deckHasOctgn = QtCore.Signal(bool) # True if deck has octgn metadata
def __init__(self):
super().__init__()
self.__cards = []
self.__card_width = MCDeck.settings.card_view_width_px
self.__card_scaled_width = None # After zoom
self.__card_scaled_height = None # After zoom
self.__zoom_lvl = 0
self.__zoom_per_lvl = 0.075
self._update_widget_card_size(reset=False)
self._undo = DeckUndoBuffer(self)
self._unsaved = True # True if current deck state is "unsaved"
self._save_file = None # Name of file of current project
self.filenameChange.emit('')
self.__clipboard = [] # Cards which have been cut or copied
self._octgn = None # OCTGN card set data for the deck (if set)
self.__view = QtWidgets.QWidget()
self.setWidget(self.__view)
def addCard(self, front, back=None, bbleed=0, ctype=0, pos=-1, show=True):
"""Add a card to the card list.
:param front: image of front side
:type front: :class:`QtGui.QImage`
:param back: image of back side (or None if no image)
:type back: :class:`QtGui.QImage`
:param bbleed: amount of bleed on back image
:param ctype: card type
:type ctype: int
:param pos: position to insert (end if -1)
:param show: if True call show() on widget before returning
:return: generated card object
:rtype: :class:`Card`
"""
card = Card(front, back, bbleed, ctype, self.__view)
card.setCardWidth(self.__card_scaled_width)
if pos < 0:
self.__cards.append(card)
else:
self.__cards.insert(pos, card)
if self._octgn and card._octgn is None:
card._octgn = octgn.OctgnCardData(name='')
card.cardSelected.connect(self.cardSingleSelected)
card.cardCtrlSelected.connect(self.cardCtrlSelected)
card.cardShiftSelected.connect(self.cardShiftSelected)
if show:
card.show()
self._deck_changed()
return card
def addCardObject(self, card, pos=-1, show=True):
"""Add a card object to the card list.
:param card: card object
:type card: :class:`Card`
:param pos: position to insert (end if -1)
:param show: if True call show() on widget before returning
"""
card.setParent(self.__view)
card.setCardWidth(self.__card_scaled_width)
if pos < 0:
self.__cards.append(card)
else:
self.__cards.insert(pos, card)
card.cardSelected.connect(self.cardSingleSelected)
card.cardCtrlSelected.connect(self.cardCtrlSelected)
card.cardShiftSelected.connect(self.cardShiftSelected)
card.setVisible(show)
self._deck_changed()
def reset(self):
"""Resets deck view."""
self._update_size(self.width(), self.height())
for card in self.__cards:
card.reset()
self.repaint()
def clear(self, undo=True):
"""Clears the deck.
:param undo: if True enable undo, otherwise clear undo buffer
"""
if undo:
self._undo.add_undo_level()
else:
self._undo.clear()
self.__cards = []
self._deck_changed()
self.reset()
def has_cards(self):
"""True if deck has cards, otherwise False."""
return bool(self.__cards)
def has_selected(self):
"""True if deck has one or more selected cards."""
for card in self.__cards:
if card.selected:
return True
else:
return False
def num_selected(self):
"""The number of selected cards."""
return sum(1 for card in self.__cards if card.selected)
def selected_cards(self):
"""Returns a list of selected cards."""
return [card for card in self.__cards if card.selected]
def show_cards(self):
"""Calls show() on all cards currently in the deck."""
for card in self.__cards:
card.show()
def hide_cards(self):
"""Calls hide() on all cards currently in the deck."""
for card in self.__cards:
card.hide()
def resizeEvent(self, event):
new_size = event.size()
self._update_size(new_size.width(), new_size.height())
def mousePressEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
key_mods = QtGui.QGuiApplication.keyboardModifiers()
shift = key_mods & QtCore.Qt.ShiftModifier
if not shift:
# Clicking in deck area outside cards, deselect all cards
for card in self.__cards:
card.select(False)
self.hasSelection.emit(False)
def wheelEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
# Capture Ctrl+Wheel and use for zoom
y_angle = event.angleDelta().y()
if y_angle > 0:
self.zoom_in()
else:
self.zoom_out()
else:
super().wheelEvent(event)
@QtCore.Slot()
def newDeck(self):
if self._unsaved:
_dfun = QtWidgets.QMessageBox.question
_keys = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel
k = _dfun(self, 'Confirm new deck', 'Current deck has unsaved '
'changes. Do you really wish to start a new deck?', _keys)
if k == QtWidgets.QMessageBox.Cancel:
return
self.hide_cards()
self.__cards = []
self._unsaved = True
self._save_file = None
self.filenameChange.emit('')
self.deckChanged.emit(True)
self._undo.clear()
self.reset()
@QtCore.Slot()
def openDeck(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
if self.__cards:
_q = QtWidgets.QMessageBox.question
btn = _q(self, 'Discard current deck?', 'Deck contains cards. '
'Open file and discard current deck?',
QtWidgets.QMessageBox.Open | QtWidgets.QMessageBox.Cancel)
if btn == QtWidgets.QMessageBox.Cancel:
return
_dlg = QtWidgets.QFileDialog.getOpenFileName
_flt = 'Zip archive (*.zip);;MCD index (*.mcd)'
path, cat = _dlg(self, 'Open MCD index or archive containing '
'an MCD index', filter=_flt)
if path:
self._open(path)
@QtCore.Slot()
def saveDeck(self):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
if self._save_file is None:
self.saveDeckAs()
else:
overwrite = False
if os.path.exists(self._save_file):
_dfun = QtWidgets.QMessageBox.question
keys = QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Cancel
k = _dfun(self, 'Confirm save', f'The file {self._save_file} '
'already exists. Do you wish to overwrite?', keys)
if k == QtWidgets.QMessageBox.Cancel:
return
overwrite = True
if not overwrite:
self._save(self._save_file)
else:
tfile = tempfile.NamedTemporaryFile(suffix='.zip',
delete=False)
tfile.close()
try:
self._save(tfile.name)
except Exception:
os.remove(tfile.name)
err('Save error', f'Could not save to {self._save_file}')
else:
os.remove(self._save_file)
os.rename(tfile.name, self._save_file)
@QtCore.Slot()
def saveDeckAs(self):
_get = QtWidgets.QFileDialog.getSaveFileName
_filter='Zip files (*.zip)'
d = os.path.dirname(self._save_file) if self._save_file else ''
path, _f = _get(self, 'Select deck filename', dir=d, filter=_filter)
if not path:
return
self._save(path)
self._save_file = path
self.filenameChange.emit(path)
@QtCore.Slot()
def exportPdf(self):
if not self.__cards:
msg_box = QtWidgets.QMessageBox(self)
msg_box.setWindowTitle('No cards')
msg_box.setText('There are no cards to export.')
msg_box.setStandardButtons(QtWidgets.QMessageBox.Cancel)
msg_box.setDefaultButton(QtWidgets.QMessageBox.Cancel)
msg_box.exec()
return
# Set up a PDF generator
_get = QtWidgets.QFileDialog.getSaveFileName
fname, filter = _get(self, 'Select file name for generated PDF file',
filter='PDF files (*.pdf);;All files (*.*)')
if fname:
if os.path.exists(fname):
os.remove(fname)
_s = MCDeck.settings
Gen = LcgCardPdfGenerator
gen = Gen(outfile=fname, pagesize=_s.pagesize, dpi=_s.page_dpi,
c_width=_s.card_width_mm, c_height=_s.card_height_mm,
bleed=_s.card_bleed_mm, margin=_s.page_margin_mm,
spacing=_s.card_min_spacing_mm,
fold=_s.card_fold_distance_mm, folded=(not _s.twosided))
gen.setTwosidedSubset(odd=True, even=True)
gen.setTwosidedEvenPageOffset(0, 0)
gen.setFeedDir(_s.feed_dir)
# Draw cards onto generator and render PDF
for card in self.__cards:
front = gen.loadCard(card.front_img)
if card.back_img:
back = gen.loadCard(card.back_img, bleed=card.back_bleed)
else:
back = None
gen.drawCard(front, back)
gen.finish()
@QtCore.Slot()
def exportTts(self):
"""Export as images for importing into Tabletop Simulator."""
TTSExportDialog(self, MCDeck.settings, self.__cards).exec()
@QtCore.Slot()
def exportOctgnCardSet(self):
"""Export deck as Octgn card set"""
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
if not self._octgn:
raise RuntimeError('Should never happen')
if not self.__cards:
_msg = 'The deck has no cards to export'
err('Nothing to export', _msg)
return
if not octgn.OctgnCardSetData.validate_legal_deck(self):
_msg = 'The deck does not have a validate set of OCTGN data'
err('Cannot export Octgn data', _msg)
return
# Get a .zip filename for saving
_get = QtWidgets.QFileDialog.getSaveFileName
d = MCDeck.settings.octgn_card_sets_path
if d is None or not os.path.isdir(d):
d = os.path.dirname(self._save_file) if self._save_file else None
path, _f = _get(self, 'Select .zip filename for export', dir=d,
filter='Zip files (*.zip)')
if not path:
return
try:
_exp = octgn.OctgnCardSetData.export_octgn_card_set
with zipfile.ZipFile(path, 'w') as zf:
_exp(self, zf, MCDeck.settings)
except Exception as e:
err('Octgn export error', f'Unable to export: {e}')
else:
info = QtWidgets.QMessageBox(self, 'Successful export', '')
text = f'''<p>An OCTGN card set with GUID
<tt>{self._octgn.set_id}</tt> and the name "{self._octgn.name}" was
exported as a .zip file.</p>
<p>The .zip file can be installed into OCTGN by using the OCTGN
card set installation option in the Tools OCTGN menu.</p>
<p>The card set can be installed manually into OCTGN by unpacking
the .zip file into the OCTGN installation's <tt>Data/</tt>
directory. This directory normally has the path
<tt>~/AppData/Local/Programs/OCTGN/Data/</tt>.</p>
<p>Installed custom cards can be used with
<a href="https://twistedsistem.wixsite.com/octgnmarvelchampions/">MC: TCG
in OCTGN</a>. Decks can be made with the OCTGN deck editor. In
order to be able to use a generated .o8d deck, it needs to
be copied into the <tt>Data/</tt> subdirectory
<tt>GameDatabase/055c536f-adba-4bc2-acbf-9aefb9756046/FanMade/</tt>.
</p>
<p>Deck(s) created with the deck editor can be added to the .zip
file by creating the .zip file directory
<tt>GameDatabase/055c536f-adba-4bc2-acbf-9aefb9756046/FanMade/</tt>
and adding the .o8d file(s) to that directory.</p>
<p>To uninstall the card set, use the OCTGN card set uninstall tool
available from the Tools menu, or remove the following
<tt>Data/</tt> subdirectories:
</p>
<ul><li>
<tt>GameDatabase/055c536f-adba-4bc2-acbf-9aefb9756046/Sets/{self._octgn.set_id}/</tt>
</li><li>
<tt>ImageDatabase/055c536f-adba-4bc2-acbf-9aefb9756046/Sets/{self._octgn.set_id}/</tt>.
</li><ul>
'''
info.setInformativeText(text)
info.setStandardButtons(QtWidgets.QMessageBox.Ok)
info.setDefaultButton(QtWidgets.QMessageBox.Ok)
info.exec()
@QtCore.Slot()
def exportOctgnDeck(self):
"""Export deck as an Octgn .o8d deck"""
octgn.OctgnCardSetData.export_o8d_deck(self, self)
@QtCore.Slot()
def cardSingleSelected(self, widget):
"""Handler for card single-selection."""
for card in self.__cards:
card.select(card is widget)
self.hasSelection.emit(True)
@QtCore.Slot()
def cardCtrlSelected(self, widget):
"""Handler for card ctrl-selection."""
for card in self.__cards:
if card is widget:
card.select(not card.selected)
break
selected = (sum(1 for card in self.__cards if card.selected) > 0)
self.hasSelection.emit(selected)
@QtCore.Slot()
def cardShiftSelected(self, widget):
"""Handler for card shift-selection."""
w_idx = self.__cards.index(widget)
sel = [(i, c) for i, c in enumerate(self.__cards) if c.selected]
if not sel:
widget.select(True)
else:
min_sel = min(i for i, c in sel)
max_sel = max(i for i, c in sel)
if min_sel <= w_idx < max_sel:
max_sel = w_idx
else:
min_sel = min(min_sel, w_idx)
max_sel = max(max_sel, w_idx)
for i, card in enumerate(self.__cards):
card.select(min_sel <= i <= max_sel)
self.hasSelection.emit(True)
@QtCore.Slot()
def cutCards(self):
"""Cut selected cards."""
cut_cards = []
cards_left = []
for card in self.__cards:
if card.selected:
cut_cards.append(card)
card.hide()
else:
cards_left.append(card)
if cut_cards:
MCDeck.clipboard().clear()
self.__clipboard = cut_cards
self.hasSelection.emit(False)
self.hasClipboard.emit(True)
self._undo.add_undo_level()
self.__cards = cards_left
self.show_cards()
self._deck_changed()
self.reset()
@QtCore.Slot()
def copyCards(self):
"""Copy selected cards."""
copy_cards = []
for card in self.__cards:
if card.selected:
copy_cards.append(card.copy())
if copy_cards:
MCDeck.clipboard().clear()
self.__clipboard = copy_cards
self.hasClipboard.emit(True)
@QtCore.Slot()
def copyCardFront(self):
card, = self.selected_cards()
MCDeck.clipboard().setImage(card.front_img)
@QtCore.Slot()
def copyCardBack(self):
card, = self.selected_cards()
MCDeck.clipboard().setImage(card.back_img)
@QtCore.Slot()
def paste(self, droppedMimeData=None, after=True, ctype=None, back=None):
"""Paste data (also used for drag & drop)."""
# Resolve start position 'pos' for pasting cards into current deck
sel_idx = [i for i, c in enumerate(self.__cards) if c.selected]
if after:
if sel_idx:
pos = max(sel_idx) + 1
else:
pos = len(self.__cards) + 1
else:
if sel_idx:
pos = min(sel_idx)
else:
pos = 0
if self.__clipboard and not droppedMimeData:
# Pasting from local application copied/cut card list buffer
for i, card in enumerate(self.__clipboard):
self._undo.add_undo_level()
self.addCardObject(card.copy(), pos=(pos + i))
self.show_cards()
else:
# Pasting from MIME data
if droppedMimeData:
mime = droppedMimeData
else:
mime = MCDeck.clipboard().mimeData()
front_images = []
if mime.hasUrls():
# Resolve URL(s)
for url in mime.urls():
if url.isLocalFile():
# Add image from local file
path = url.toLocalFile()
if not os.path.exists(path):
front_images.append(None)
elif os.path.isfile(path):
# Try to add single file
img = QtGui.QImage()
if path and img.load(path):
front_images.append(img)
else:
front_images.append(None)
elif os.path.isdir(path):
# Add all image files inside directory
entries = os.listdir(path)
for e in entries:
# Ignore hidden files
if e.startswith('.'):
continue
_p = os.path.join(path, e)
if os.path.isfile(_p):
img = QtGui.QImage()
if _p and img.load(_p):
front_images.append(img)
else:
front_images.append(None)
else:
front_images.append(None)
else:
# Retreive image from remote URL
response = urllib.request.urlopen(url.url())
if isinstance(response, http.client.HTTPResponse):
ctype = response.getheader('Content-Type', '')
mime_types = ctype.split(';')
mime_types = [s.strip() for s in mime_types]
mime_match = image_mime_type(mime_types)
if mime_match:
img_data = response.read()
img = QtGui.QImage()
if img.loadFromData(img_data):
front_images.append(img)
continue
front_images.append(None)
else:
print('Unsupported UTL type:', url.url())
front_images.append(None)
elif mime.hasImage():
mime_types = set(mime.formats())
_st = QtGui.QImageReader.supportedMimeTypes()
supp_types = set([mt.toStdString() for mt in _st])
overlap = mime_types & supp_types
if overlap:
# Pick a random format
mime_type = overlap.pop()
img = QtGui.QImage()
data = mime.data(mime_type)
if img.loadFromData(data, mime_type):
front_images.append(img)
else:
front_images.append(None)
else:
front_images.append(None)
elif 'application/x-qt-image' in mime.formats():
mime_types = set(mime.formats())
img = QtGui.QImage()
data = mime.data('application/x-qt-image')
if img.loadFromData(data, 'application/x-qt-image'):
front_images.append(img)
else:
front_images.append(None)
else:
raise RuntimeError('Should never happen')
# Handle situation that one or more images did not load
if sum(1 for img in front_images if img) == 0:
# No valid images
msg_box = QtWidgets.QMessageBox(self)
msg_box.setWindowTitle('No images')
msg_box.setText('No images could be added (wrong type(s) or '
'failed to load).')
msg_box.setStandardButtons(QtWidgets.QMessageBox.Cancel)
msg_box.setDefaultButton(QtWidgets.QMessageBox.Cancel)
msg_box.exec()
return
elif sum(1 for img in front_images if img is None) > 0:
# One or more invalid images
QMB = QtWidgets.QMessageBox
_q = QtWidgets.QMessageBox.question
val = _q(self, 'Invalid image(s)', 'Some images are invalid '
'(not images or failed to load). Proceed by '
'adding the valid images, ignoring the invalid ones?',
buttons=QMB.Ok | QMB.Abort,
defaultButton=QMB.Abort)
if val != QMB.Ok:
return
front_images = [img for img in front_images if img]
# Handle automatic aspect transformation of cards
_s = MCDeck.settings
aspect_rotation = _s.aspect_rotation
if aspect_rotation != 'none':
if aspect_rotation == 'clockwise':
clockwise = True
if aspect_rotation == 'anticlockwise':
clockwise = False
else:
raise RuntimeError('Should never happen')
portrait = (_s.card_height_mm >= _s.card_width_mm)
for i, img in enumerate(front_images):
if not isinstance(img, LcgImage):
img = LcgImage(img)
c_portrait = (img.heightMm() >= img.widthMM())
if portrait ^ c_portrait:
# Wrong aspect, rotate
if clockwise:
front_images[i] = img.rotateClockwise()
else:
front_images[i] = img.rotateAntiClockwise()
_added_undo = False
if ctype is None:
# Show dialog to ask for what type of card back to use
dlg = CardTypeDialog(self)
if dlg.exec():
self._undo.add_undo_level()
_added_undo = True
res_type, res_data = dlg.result
if res_type == 3:
# Card fronts are the same as card backs
ctype = Card.type_unspecified
for i, img in enumerate(front_images):
self.addCard(img, img, 0, ctype, pos + i)
else:
if res_type == 1:
back = None
ctype = res_data
elif res_type == 2:
back = res_data
ctype = Card.type_unspecified
elif res_type == 4:
back = None
ctype = Card.type_unspecified
else:
raise RuntimeError('Should never happen')
for i, img in enumerate(front_images):
self.addCard(img, back, 0, ctype, pos + i)
else:
# Use card type and card back image from method arguments
self._undo.add_undo_level()
_added_undo = True
for i, img in enumerate(front_images):
self.addCard(img, back, 0, ctype, pos + i)
if _added_undo:
self.show_cards()
self.reset()
@QtCore.Slot()
def pasteBefore(self):
"""Paste before (currently selected) card(s)."""
self.paste(after=False)
@QtCore.Slot()
def pastePlayer(self):
"""Paste as player type card."""
self.paste(ctype=Card.type_player)
@QtCore.Slot()
def pasteEncounter(self):
"""Paste as encounter type card."""
self.paste(ctype=Card.type_encounter)
@QtCore.Slot()
def pasteVillain(self):
"""Paste as villain type card."""
self.paste(ctype=Card.type_villain)
@QtCore.Slot()
def settingsChanged(self):
card_width = MCDeck.settings.card_view_width_px
self._update_widget_card_size(card_width, reset=False)
self.reset()
@QtCore.Slot()
def systemClipboardChanged(self):
mime = MCDeck.clipboard().mimeData()
if mime and mime.formats():
# Clipboard has (changed) data, invalidate any local clipboard
self.__clipboard = []
if mime.hasUrls():
self.hasClipboard.emit(True)
elif mime.hasImage():
mime_type = image_mime_type(mime)
if mime_type:
self.hasClipboard.emit(True)
elif 'application/x-qt-image' in mime.formats():
# For now, unable to handle this MIME type, see
# https://bugreports.qt.io/browse/QTBUG-93632
if not self.__clipboard:
self.hasClipboard.emit(False)
else:
# Unsupported image format
if not self.__clipboard:
self.hasClipboard.emit(False)
else:
# Unsupported MIME format
if not self.__clipboard:
self.hasClipboard.emit(False)
else:
if not self.__clipboard:
self.hasClipboard.emit(False)
@QtCore.Slot()
def selectAll(self):
"""Select all cards in the deck."""
for card in self.__cards:
card.select(True)
self.hasSelection.emit(True)
@QtCore.Slot()
def selectNone(self):
"""Select all cards in the deck."""
for card in self.__cards:
card.select(False)
self.hasSelection.emit(False)
@QtCore.Slot()
def setPlayerType(self):
"""Set card type to player for selected cards."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.ctype = Card.type_player
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def setEncounterType(self):
"""Set card type to encounter for selected cards."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.ctype = Card.type_encounter
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def setVillainType(self):
"""Set card type to villain for selected cards."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.ctype = Card.type_villain
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def setUnspecifiedType(self):
"""Set card type to unspecified for selected cards."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.ctype = Card.type_unspecified
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def useFrontAsBack(self):
"""Use card front image as the back side image also."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.set_back_image(card.front_img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def setFrontImage(self):
"""Open an image file as the card front for the card(s)."""
if self.has_selected():
_fun = loadImageFromFileDialog
img = _fun(self, 'Open card back image file')
if img:
# Handle aspect transformation
img = LcgImage(img)
_s = MCDeck.settings
aspect_rotation = _s.aspect_rotation
if aspect_rotation != 'none':
if aspect_rotation == 'clockwise':
clockwise = True
if aspect_rotation == 'anticlockwise':
clockwise = False
else:
raise RuntimeError('Should never happen')
portrait = (_s.card_height_mm >= _s.card_width_mm)
c_portrait = (img.heightMm() >= img.widthMM())
if portrait ^ c_portrait:
# Wrong aspect, rotate
if clockwise:
img = img.rotateClockwise()
else:
img = img.rotateAntiClockwise()
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.set_front_image(img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def setBackImage(self):
"""Open an image file as the card back for the card(s)."""
if self.has_selected():
_fun = loadImageFromFileDialog
img = _fun(self, 'Open card back image file')
if img:
# Handle aspect transformation
img = LcgImage(img)
_s = MCDeck.settings
aspect_rotation = _s.aspect_rotation
if aspect_rotation != 'none':
if aspect_rotation == 'clockwise':
clockwise = True
if aspect_rotation == 'anticlockwise':
clockwise = False
else:
raise RuntimeError('Should never happen')
portrait = (_s.card_height_mm >= _s.card_width_mm)
c_portrait = (img.heightMm() >= img.widthMM())
if portrait ^ c_portrait:
# Wrong aspect, rotate
if clockwise:
img = img.rotateClockwise()
else:
img = img.rotateAntiClockwise()
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.set_back_image(img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def removeBackImage(self):
"""Remove the back image set on the cards."""
if self.has_selected:
# Check if any selected card has alt side OCTGN data
_has_octgn_alt = False
for card in self.__cards:
if card.selected and card._octgn and card._octgn.alt_data:
_has_octgn_alt = True
break
if _has_octgn_alt:
_dfun = QtWidgets.QMessageBox.question
_msg = ('One or more selected card(s) has OCTGN alt side '
'metadata. Removing the back image will also remove '
'that metadata. Proceed with removing back image(s)?')
_k = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel
confirm = _dfun(self, 'Confirm removal', _msg, _k)
if confirm == QtWidgets.QMessageBox.Cancel:
return
# Remove back images (and any Octgn alt data)
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
card.set_back_image(None)
if card._octgn:
card._octgn._alt_data = None
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def rotateHalfCircle(self):
"""Rotate front card(s) 180 degrees."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
img = LcgImage(card.front_img).rotateHalfCircle()
card.set_front_image(img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def rotateClockwise(self):
"""Rotate front card(s) 90 degrees clockwise."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
img = LcgImage(card.front_img).rotateClockwise()
card.set_front_image(img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def rotateAntiClockwise(self):
"""Rotate front card(s) 90 degrees anticlockwise."""
if self.has_selected:
self._undo.add_undo_level()
for i, card in enumerate(self.__cards):
if card.selected:
card = self._copy_card(card)
img = LcgImage(card.front_img).rotateAntiClockwise()
card.set_front_image(img)
card.select(True)
self.__cards[i] = card
self._deck_changed()
self.show_cards()
@QtCore.Slot()
def deleteCards(self):
"""Delete selected cards."""
if self.has_selected:
self._undo.add_undo_level()
cards_left = []
for i, card in enumerate(self.__cards):
if not card.selected:
cards_left.append(card)
self.__cards = cards_left
self.show_cards()
self._deck_changed()
self.reset()
@QtCore.Slot()
def back_image_on_top(self, status):
"""Set status whether to show back image on top."""
reset = ((not MCDeck._front_on_top) ^ status)
MCDeck._front_on_top = not status
if reset:
self.reset()
@QtCore.Slot()
def zoom_reset(self):
"""Reset to 100% zoom."""
self.__zoom_lvl = 0
self._update_widget_card_size()
@QtCore.Slot()
def zoom_in(self):
"""Zoom in one zoom level."""
self.__zoom_lvl += 1
self._update_widget_card_size()
@QtCore.Slot()
def zoom_out(self):
"""Zoom out one zoom level."""
self.__zoom_lvl -= 1
self._update_widget_card_size()
@QtCore.Slot()
def cancelOperation(self):
self.__operation_cancelled = True
@QtCore.Slot()
def undoAction(self):
self._undo_action()
@QtCore.Slot()
def redoAction(self):
self.hide_cards()
self.__cards = self._undo.redo()
for card in self.__cards:
card.select(False)
self._deck_changed()
self.reset()
@property
def _card_list_copy(self):
"""A copy of the current list of cards."""
return self.__cards.copy()
def _undo_action(self, deselect=True, purge=False):
self.hide_cards()
self.__cards = self._undo.undo(purge=purge)
if deselect:
for card in self.__cards:
card.select(False)
self._deck_changed()
self.reset()
def _update_widget_card_size(self, width=None, reset=True):
"""Updates card widget size to the specified width (in pixels).
:param width: new card widget width (in pixels), current if None
:param reset: if True call :meth:`reset` if width was changed
Actual width is scaled in accordance with current zoom level.
"""
if width is None:
width = self.__card_width
self.__card_width = width
if self.__zoom_lvl == 0:
scaled = width
elif self.__zoom_lvl > 0:
scaled = int(width*(1 + self.__zoom_per_lvl)**self.__zoom_lvl)
elif self.__zoom_lvl < 0:
scaled = int(width*(1 - self.__zoom_per_lvl)**(-self.__zoom_lvl))
scaled = max(scaled, 8) # Ensure we never go below 8 pixels width
# Update card width and height in deck view
self.__card_scaled_width = scaled
_s_c_height = MCDeck.settings.card_height_mm
_s_c_width = MCDeck.settings.card_width_mm
self.__card_scaled_height = int(scaled*(_s_c_height/_s_c_width))
# Update card width (and height) of card widgets
for card in self.__cards:
card.setCardWidth(scaled)
if reset:
self.reset()
def _save(self, filename):
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
self.__operation_cancelled = False
dlg = QtWidgets.QProgressDialog('Saving card(s)', 'Cancel',
0, len(self.__cards))
dlg.canceled.connect(self.cancelOperation)
def dlg_add():
dlg.setValue(dlg.value() + 1)
QtCore.QCoreApplication.processEvents() # Force Qt update
if self.__operation_cancelled:
err('Operation cancelled', 'Operation cancelled by user.')
raise LcgException('Operation was cancelled')
# Generate OCTGN save data (if any)
if self._octgn:
set_info = self._octgn
card_data_l = []
for card in self.__cards:
c_data = card._octgn
if c_data.alt_data and not card.specified_back_img:
_msg = 'Card(s) with no back img have alt card OCTGN data'
err('Metadata problem', _msg)
return
card_data_l.append(c_data)
octgn_file_s = set_info.to_str(card_data_l)
else:
octgn_file_s = None
try:
with zipfile.ZipFile(filename, 'w') as zf:
mcd = ('# MCdeck definition of a custom cards MC:TCG deck.\n'
'# See https://pypi.org/project/mcdeck/ for info.\n')
mode = None
n_p, n_e, n_v, n_s = 0, 0, 0, 0
for card in self.__cards:
_mode = None
_next = None
if card.ctype == Card.type_player:
_mode = 'player'
n_p += 1
_next = n_p
elif card.ctype == Card.type_encounter:
_mode = 'encounter'
n_e += 1
_next = n_e
elif card.ctype == Card.type_villain:
_mode = 'villain'
n_v += 1
_next = n_v
if _mode and not card.specified_back_img:
# Store player, encounter or villain card
if _mode != mode:
mcd += f'\n{_mode}:\n'
mode = _mode
img = LcgImage(card.front_img)
data = img.saveToBytes(format='PNG')
path = os.path.join(mode, f'img_{_next:05}.png')
zf.writestr(path, data)
mcd += f' {to_posix_path(path)}\n'
dlg_add()
else:
# Single card
mode = None
n_s += 1
if card.back_img and card.back_bleed > 0:
mcd += f'\nsingle [back_bleed={card.back_bleed}]:\n'
else:
mcd += '\nsingle:\n'
img = LcgImage(card.front_img)
data = img.saveToBytes(format='PNG')
if card.back_img:
path = os.path.join('single', f'img_{n_s:05}_A.png')
else:
path = os.path.join('single', f'img_{n_s:05}.png')
zf.writestr(path, data)
mcd += f' {to_posix_path(path)}\n'
if card.back_img:
img = LcgImage(card.back_img)
data = img.saveToBytes(format='PNG')
path = os.path.join('single', f'img_{n_s:05}_B.png')
zf.writestr(path, data)
mcd += f' {to_posix_path(path)}\n'
dlg_add()
# Write the card definition file to the top level of the zipfile
zf.writestr('mcdeck.mcd', mcd)
# If the deck has OCTGN metadata, save it
if octgn_file_s:
zf.writestr('octgn.txt', octgn_file_s)
except Exception as e:
try:
os.remove(filename)
except FileNotFoundError:
pass
err('Save error', f'Unable to save file: {e}')
else:
self._unsaved = False
self.deckChanged.emit(False)
def _open(self, filename):
"""Opens file (must be a .zip or .mcd file).
Returns True if successful, otherwise False.
"""
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
if not os.path.exists(filename):
_msg = f'{filename} does not exist'
err('No such file', _msg)
return False
elif filename.lower().endswith('.zip'):
zf = zipfile.ZipFile(filename, 'r')
for s in zf.namelist():
if s.lower() == 'mcdeck.mcd':
mcd = zf.read(s).decode('utf-8')
break
else:
_msg = ('Zip file does not include required card index file '
'mcdeck.mcd in the top dir')
err('Missing mcdeck.mcd', _msg)
return False
elif filename.lower().endswith('.mcd'):
zf = None
mcd = open(filename, 'r').read()
mcd_dir = os.path.dirname(os.path.realpath(filename))
else:
_msg = 'File must be .zip or .mcd'
err('Invalid file', _msg)
return False
# If OCTGN metadata file present, decode for later
octgn_data = None
try:
if zf:
for s in zf.namelist():
if s.lower() == 'octgn.txt':
_s = zf.read(s).decode('utf-8')
octgn_data = octgn.OctgnCardSetData.from_str(_s)
break
else:
_dir = os.path.dirname(filename)
octgn_file = os.path.join(_dir, 'octgn.txt')
if os.path.isfile(octgn_file):
with open(octgn_file, 'r') as f:
_s = f.read()
octgn_data = octgn.OctgnCardSetData.from_str(_s)
except Exception as e:
_msg = ('Metadata file "octgn.txt" present but could '
f'not parse its contents: {e}')
err('Metadata error (OCTGN)', _msg)
return False
# Clear current deck
self.clear()
QtCore.QCoreApplication.processEvents() # Force Qt display update
# (Try to) load deck
_url_download_approved = False
self.__operation_cancelled = False
dlg = QtWidgets.QProgressDialog('Parsing mcdeck.mcd', 'Cancel', 0, 100)
dlg.canceled.connect(self.cancelOperation)
try:
mode = None
mode_sub = None
mcd_lines = list(enumerate(mcd.splitlines()))
dlg.setMaximum(len(mcd_lines))
dlg.show()
while mcd_lines:
num, line = mcd_lines.pop(0)
dlg.setValue(num)
QtCore.QCoreApplication.processEvents() # Force Qt update
if self.__operation_cancelled:
err('Operation cancelled', 'Operation cancelled by user.')
raise LcgException('Operation was cancelled')
if line.startswith('#'):
continue
if not line.strip():
mode = None
continue
_mode_set_here = False
if line and line[:1].strip():
# First character is not whitespace -> section title line
try:
l, s, p = parse_mcd_file_section_header(line)
except ValueError as e:
err('MCD file error',
f'Format error line {num + 1}: {e}')
raise LcgException('Invalid MCD index file')
if l.lower() in ('player', 'encounter', 'villain'):
# Player, encounter or villain section
if mode:
err('MCD file error',
f'Missing linespace before line {num + 1}')
raise LcgException('Invalid MCD index file')
# Re-parse with approved arguments list
_p = parse_mcd_file_section_header
try:
m_str, _s, pairs = _p(line, [], ['source'])
except ValueError as e:
err('MCD file error',
f'Format error line {num + 1}: {e}')
raise LcgException('Invalid MCD index file')
if 'source' in pairs:
_val = pairs['source']
if _val in ('url', 'gdrive'):
if not _url_download_approved:
_dfun = QtWidgets.QMessageBox.question
_msg = ('Card index contains URL(s). '
'Download the remote image(s)?')
_k = QtWidgets.QMessageBox.Yes
_k = _k | QtWidgets.QMessageBox.Cancel
confirm = _dfun(self, 'Confirm download',
_msg, _k)
if confirm == QtWidgets.QMessageBox.Cancel:
MCDeck.deck.clear()
return
else:
_url_download_approved = True
mode_sub = _val
else:
err('MCD file error',
f'Invalid source argument line {num + 1}')
raise LcgException('Invalid MCD index file')
else:
mode_sub = None
mode = m_str
_mode_set_here = True
if _mode_set_here:
continue
if mode:
# Path to card (dir) inside an active mode
line = line.strip()
if mode_sub is None:
if zf:
# Read card(s) from zip file
path = to_posix_path(line).strip(posixpath.sep)
for p in zf.namelist():
_path = to_posix_path(p).strip(posixpath.sep)
if path == _path:
break
else:
err('MCD file error',
f'No such path in zip file, line {num + 1}')
raise LcgException('Invalid MCD index file')
paths = []
if zf.getinfo(p).is_dir():
for s in zf.namelist():
if s.startswith(p) and not zf.getinfo(s).is_dir():
paths.append(s)
if not paths:
err('MCD file error',
f'Directory contains no files, line {num + 1}')
else:
paths.append(p)
for p in paths:
img_data = zf.read(p)
img = QtGui.QImage()
if not img.loadFromData(img_data):
err('Image load error',
f'Could not open image {p} in zip file')
raise LcgException('Image load error')
ctype_d = {'player':Card.type_player,
'encounter':Card.type_encounter,
'villain':Card.type_villain}
self.addCard(img, ctype=ctype_d[mode])
else:
# Read card(s) from local file system
path = os.path.join(mcd_dir, to_local_path(line))
if not os.path.exists(path):
err('No such file', f'{path} does not exist')
paths = []
if os.path.isdir(path):
# Traverse subdir, all files
for root, dir, files in os.walk(path):
for f in files:
# Add file unless it is hidden
if not f.startswith('.'):
paths.append(os.path.join(root, f))
if not paths:
err('MCD file error',
'Directory contains no files, '
f'line {num + 1}')
else:
paths.append(path)
for p in paths:
img_data = open(p, 'rb').read()
img = QtGui.QImage()
if not img.loadFromData(img_data):
err('Image load error',
f'Could not open image {p}')
raise LcgException('Image load error')
ctype_d = {'player':Card.type_player,
'encounter':Card.type_encounter,
'villain':Card.type_villain}
self.addCard(img, ctype=ctype_d[mode])
else:
# Load from specified source
if mode_sub == 'url':
img_url = line
elif mode_sub == 'gdrive':
img_url = ('https://drive.google.com/uc?'
f'export=download&id={line}')
else:
raise RuntimeError('Should never happen')
try:
img = download_image(img_url)
except Exception:
err('Image load error',
f'Could not open image {img_url}')
raise LcgException('Image load error')
ctype_d = {'player':Card.type_player,
'encounter':Card.type_encounter,
'villain':Card.type_villain}
self.addCard(img, ctype=ctype_d[mode])
elif line and line[:1].strip():
# First character is not whitespace -> section
try:
l, s, p = parse_mcd_file_section_header(line)
except ValueError as e:
err('MCD file error',
f'Format error line {num + 1}: {e}')
raise LcgException('Invalid MCD index file')
if l != 'single':
# player, encounter and villain sections parsed
# earlier; if not single here, no possible alternatives
err('MCD file error',
f'Expected "single" section line {num + 1}')
raise LcgException('Invalid MCD index file')
_p = parse_mcd_file_section_header
try:
l, _s, pairs = _p(line, [], ['back_bleed', 'source'])
except ValueError as e:
err('MCD file error',
f'Format error line {num + 1}: {e}')
raise LcgException('Invalid MCD index file')
if 'back_bleed' in pairs:
back_bleed = float(p['back_bleed'])
if back_bleed < 0:
err('MCD file error',
f'Invalid back_bleed arg line {num + 1}')
raise LcgException('Invalid MCD index file')
else:
back_bleed = 0
if 'source' in pairs:
_val = pairs['source']
if _val in ('url', 'gdrive'):
mode_sub = _val
if not _url_download_approved:
_dfun = QtWidgets.QMessageBox.question
_msg = ('Card index contains URL(s). '
'Download the remote image(s)?')
_k = QtWidgets.QMessageBox.Yes
_k = _k | QtWidgets.QMessageBox.Cancel
confirm = _dfun(self, 'Confirm download',
_msg, _k)
if confirm == QtWidgets.QMessageBox.Cancel:
MCDeck.deck.clear()
return
else:
_url_download_approved = True
else:
err('MCD file error',
f'Invalid source argument line {num + 1}')
raise LcgException('Invalid MCD index file')
else:
mode_sub = None
# Read single card data
single_args = []
while mcd_lines:
num, line = mcd_lines.pop(0)
if not line.strip():
break
if not line[0].isspace():
err('MCD file error',
f'Expected indent on line {num + 1}')
raise LcgException('Invalid MCD index file')
single_args.append(line.strip())
if not 1 <= len(single_args) <= 2:
err('MCD file error',
'Single card should have 1 or 2 args, line '
f'{num + 1}')
raise LcgException('Invalid MCD index file')
single_images = []
for arg in single_args:
if mode_sub is None:
if zf:
# Read card(s) from zip file
path = to_posix_path(arg).strip(posixpath.sep)
for p in zf.namelist():
_path = to_posix_path(p).strip(posixpath.sep)
if path == _path:
break
else:
err('MCD file error',
'No such file in zip file, line '
f'{num + 1}')
raise LcgException('Invalid MCD index file')
if zf.getinfo(p).is_dir():
err('MCD file error',
f'Entry is a directory, line {num + 1}')
img_data = zf.read(p)
img = QtGui.QImage()
if not img.loadFromData(img_data):
err('Image load error',
f'Could not open image {p} in zip file')
raise LcgException('Image load error')
single_images.append(img)
else:
# Read card(s) from file system
path = os.path.join(mcd_dir, to_local_path(arg))
if not os.path.exists(path):
err('MCD file error',
f'No such file {path}, line {num + 1}')
raise LcgException('Invalid MCD index file')
if os.path.isdir(path):
err('MCD file error',
f'Entry is a directory, line {num + 1}')
img_data = open(path, 'rb').read()
img = QtGui.QImage()
if not img.loadFromData(img_data):
err('Image load error',
f'Could not open image {path}')
raise LcgException('Image load error')
single_images.append(img)
else:
if mode_sub == 'url':
img_url = arg
elif mode_sub == 'gdrive':
img_url = ('https://drive.google.com/uc?'
f'export=download&id={arg}')
else:
raise RuntimeError('Should never happen')
try:
img = download_image(img_url)
except Exception:
err('Image load error',
f'Could not open image from {img_url}')
raise LcgException('Image load error')
single_images.append(img)
# Add single card
if len(single_images) == 1:
front_img, = single_images
back_img = None
else:
front_img, back_img = single_images
self.addCard(front_img, back_img, back_bleed)
else:
err('MCD file error', f'Syntax error line {num}')
raise LcgException('Invalid MCD index file')
# If OCTGN metadata is present, add metadata to cards
if octgn_data:
card_set_data, card_data_list = octgn_data
if len(self.__cards) != len(card_data_list):
raise LcgException('Number of cards does not match number '
'of cards with OCTGN metadata')
self._octgn = card_set_data
for card, data in zip(self.__cards, card_data_list):
if data.alt_data and not card.specified_back_img:
_msg = ('There is/are card(s) with alternate card '
'OCTGN metadata without a card back side')
raise LcgException(_msg)
card._octgn = data
self.deckHasOctgn.emit(True)
else:
self.deckHasOctgn.emit(False)
self.reset()
except LcgException as e:
# Could not load deck, clear the partially loaded deck
for card in self.__cards:
card.hide()
self.__cards = []
self._octgn = None
self.reset()
err = lambda s1, s2: ErrorDialog(self, s1, s2).exec()
err('Error loading deck', f'Could not load deck: {e}')
return False
else:
self._unsaved = False
if filename.lower().endswith('.zip'):
self._save_file = filename
self.filenameChange.emit(filename)
else:
self._save_file = None
self.filenameChange.emit('')
self.deckChanged.emit(False)
return True
def _deck_changed(self):
"""Process that a change was made to the deck"""
self._unsaved = True
self.deckChanged.emit(True)
def _update_size(self, width, height):
# Calculate how many cards fit horizontally in view, and view width
cols = max(int(width/self.__card_scaled_width), 1)
x_span = max(self.__card_scaled_width, width)
# Calculate number of rows and view height
rows = len(self.__cards) // cols
if len(self.__cards) % cols > 0:
rows += 1
rows = max(rows, 1)
y_span = max(rows*self.__card_scaled_height, height)
# Resize internal card view
self.__view.resize(x_span, y_span)
# Place cards
for i, card in enumerate(self.__cards):
row, col = i // cols, i % cols
xpos = col*self.__card_scaled_width
ypos = row*self.__card_scaled_height
card.move(QtCore.QPoint(xpos, ypos))
def _copy_card(self, card):
"""Copies a card and connects the result to appropriate deck slots.
:param card: the card to copy
:type card: :class:`Card`
:return: copied card
:rtype: :class:`Card`
The card should be a card in the deck.
"""
if card not in self.__cards:
raise ValueError('Card not in deck')
card = card.copy()
card.cardSelected.connect(self.cardSingleSelected)
card.cardCtrlSelected.connect(self.cardCtrlSelected)
card.cardShiftSelected.connect(self.cardShiftSelected)
return card
class Card(QtWidgets.QWidget):
# Enum values for resolving card types
type_unspecified = 0
type_player = 1
type_encounter = 2
type_villain = 3
"""View for one single card.
:param front: card front side
:type front: :class:`PySide6.QtGui.QImage`
:param back: card back side (None if no image set)
:type back: :class:`PySide6.QtGui.QImage`
:param bbleed: amount of bleed on back image
:param ctype: card type
:type ctype: int
:param parent: parent widget
:type parent: :class:`QtWidgets.QWidget`
The `ctype` argument must be either `ctype.type_unspecified`,
`ctype.type_player`, `ctype.type_encounter` or `ctype.type_villain`.
*args* and *kwargs* are passed to :class:`QtWidgets.QWidget` constructor.
"""
cardSelected = QtCore.Signal(QtWidgets.QWidget) # Single card select
cardCtrlSelected = QtCore.Signal(QtWidgets.QWidget) # Card ctrl-select
cardShiftSelected = QtCore.Signal(QtWidgets.QWidget) # Card shift-select
def __init__(self, front, back=None, bbleed=0, ctype=0, parent=None):
super().__init__(parent)
self.__front = front
self.__back = back
self.__back_bleed = bbleed
if ctype not in (Card.type_unspecified, Card.type_player,
Card.type_encounter, Card.type_villain):
raise ValueError('Illegal card type value')
self.__type = ctype
self.__scaled_front_img = None
self.__scaled_back_img = None
self.__back_offset = 0
self.__margin = 0
self.__cropped_back = None
self._octgn = None # OCTGN card data for the card (if set)
self._octgn_back = None # OCTGN card data for the card back (if set)
self._imported = False # If True the card was originally imported
self._selected = False
# Palette for background color when selected
pal = QtGui.QPalette()
pal.setColor(QtGui.QPalette.Window, '#cde8ff')
self.setPalette(pal)
self.reset()
def setCardWidth(self, width):
"""Calculates widget height and sets widget size."""
height = int(self._calcWidgetAspectHeight(width))
self.setFixedSize(width, height)
def reset(self):
"""Resets card rendering from card config information."""
self.__cropped_back = None
self.__scaled_back_img = None
self._update_size(self.width(), self.height())
self.setAutoFillBackground(self._selected)
self.repaint()
def paintEvent(self, event):
# Internal function for drawing front or back image
def _draw_img(p, img, x, y):
rounding_mm = MCDeck.settings.corner_rounding_mm
if rounding_mm == 0:
p.drawImage(QtCore.QPoint(x, y), img)
else:
brush = QtGui.QBrush(img)
p.setBrush(brush)
p.setBrushOrigin(x, y)
w_px, h_px = img.width(), img.height()
r_x_px = int((rounding_mm/img.widthMm())*w_px)
r_y_px = int((rounding_mm/img.heightMm())*h_px)
p.drawRoundedRect(x, y, w_px, h_px, r_x_px, r_y_px)
painter = QtGui.QPainter(self)
front_img, back_img = self.__scaled_front_img, self.__scaled_back_img
if MCDeck._front_on_top:
if back_img:
_draw_img(painter, back_img, self.__back_x, self.__back_y)
if front_img:
_draw_img(painter, front_img, self.__front_x, self.__front_y)
if not MCDeck._front_on_top:
if back_img:
_draw_img(painter, back_img, self.__back_x, self.__back_y)
painter.end()
def resizeEvent(self, event):
size = event.size()
self._update_size(size.width(), size.height())
def mousePressEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
key_mods = QtGui.QGuiApplication.keyboardModifiers()
shift = key_mods & QtCore.Qt.ShiftModifier
ctrl = key_mods & QtCore.Qt.ControlModifier
if shift:
self.cardShiftSelected.emit(self)
elif ctrl:
self.cardCtrlSelected.emit(self)
else:
self.cardSelected.emit(self)
def copy(self):
"""Generate a copy of this card."""
card = Card(self.__front, self.__back, self.__back_bleed, self.__type,
self.parentWidget())
if self._octgn:
card._octgn = self._octgn.copy()
card.setCardWidth(self.width())
card.move(self.pos())
return card
def select(self, state):
"""Set new card selection state.
:param state: new state
:type state: bool
"""
changed = state ^ self._selected
self._selected = state
if changed:
self.reset()
def set_front_image(self, image):
"""Sets a new front image for the card.
:param image: image to set as front image (if None, remove it)
:type image: :class:`QtGui.QImage`
"""
if (not isinstance(image, QtGui.QImage) or image.isNull()):
raise ValueError('Must be a valid image')
self.__front = image
self.__scaled_front_img = None
self.reset()
def set_back_image(self, image, bleed=0):
"""Sets a back image for the card.
:param image: image to set as back image (if None, remove it)
:type image: :class:`QtGui.QImage`
:param bleed: bleed included on image, in mm
"""
if image is None:
self.__back = None
else:
if (not isinstance(image, QtGui.QImage) or image.isNull() or
bleed < 0):
raise ValueError('Must be a valid image with bleed >= 0')
self.__back = image
self.__back_bleed = bleed
self.reset()
@property
def selected(self):
"""True if card is currently selected."""
return self._selected
@property
def front_img(self):
"""Card front side image."""
return self.__front
@property
def back_img(self):
"""Card back side image (either set on card, or derived from type).
If no image was set for the back side and the card has a type for which
a back side has been specified in settings, that image is returned.
"""
if self.__back:
return self.__back
elif self.__type == Card.type_player:
return MCDeck.settings.player_back_image()
elif self.__type == Card.type_encounter:
return MCDeck.settings.encounter_back_image()
elif self.__type == Card.type_villain:
return MCDeck.settings.villain_back_image()
else:
return None
@property
def specified_back_img(self):
"""Back side image set on card (ignoring card backs from card type)."""
return self.__back
@property
def back_bleed(self):
"""Amount of bleed on :attr:`back_img` (mm)."""
if self.__back:
return self.__back_bleed
elif self.__type == Card.type_player:
return MCDeck.settings.player_bleed_mm
elif self.__type == Card.type_encounter:
return MCDeck.settings.encounter_bleed_mm
elif self.__type == Card.type_villain:
return MCDeck.settings.villain_bleed_mm
else:
return 0
@property
def specified_back_bleed(self):
"""Amount of bleed on attr:`specified_back_img` (mm)."""
return self.__back_bleed
@property
def ctype(self):
"""Card type.
Card type is either `Card.type_unspecified`, `Card.type_player`,
`Card.type_encounter` or `Card.type_villain`.
"""
return self.__type
@ctype.setter
def ctype(self, value):
if value not in (Card.type_unspecified, Card.type_player,
Card.type_encounter, Card.type_villain):
raise ValueError('Illegal card type value')
self.__type = value
self.__scaled_back_img = None
self.__cropped_back = None
self.reset()
def _update_size(self, width, height):
_s = MCDeck.settings
back_rel_offset = _s.card_back_rel_offset
card_rel_margin = _s.card_back_rel_spacing
card_width = width/(1 + back_rel_offset)
card_width /= (1 + 2*card_rel_margin)
self.__back_offset = card_width * back_rel_offset
self.__margin = (card_width + self.__back_offset)*card_rel_margin
_s_c_height = _s.card_height_mm
_s_c_width = _s.card_width_mm
card_height = card_width*(_s_c_height/_s_c_width)
self.__front_x = int(self.__margin)
self.__front_y = self.__front_x
self.__back_x = int(self.__margin + self.__back_offset)
self.__back_y = self.__back_x
card_width = int(card_width)
card_height = int(self._calcWidgetAspectHeight(card_width))
# Card front
if (self.__scaled_front_img is None or
self.__scaled_front_img.width() != card_width or
self.__scaled_front_img.height() != card_height):
size = QtCore.QSize(card_width, card_height)
mode = QtCore.Qt.SmoothTransformation
_img = self.__front.scaled(size, mode=mode)
self.__scaled_front_img = LcgImage(_img)
self.__scaled_front_img.setWidthMm(_s.card_width_mm)
self.__scaled_front_img.setHeightMm(_s.card_height_mm)
# Card back
if self.back_img:
if self.__cropped_back is None:
if self.back_bleed == 0:
self.__cropped_back = self.back_img
else:
img = LcgImage(self.back_img).cropBleed(self.back_bleed)
self.__cropped_back = img
back = self.__cropped_back
if (self.__scaled_back_img is None or
self.__scaled_back_img.width() != card_width or
self.__scaled_back_img.height() != card_height):
size = QtCore.QSize(card_width, card_height)
mode = QtCore.Qt.SmoothTransformation
_img = back.scaled(size, mode=mode)
self.__scaled_back_img = LcgImage(_img)
self.__scaled_back_img.setWidthMm(_s.card_width_mm)
self.__scaled_back_img.setHeightMm(_s.card_height_mm)
def _calcWidgetAspectHeight(self, width):
"""Calculate widget height for correct card aspect for given width.
:param width: target card width
:type width: float or int
"""
back_rel_offset = MCDeck.settings.card_back_rel_offset
card_rel_margin = MCDeck.settings.card_back_rel_spacing
card_width = width/(1 + back_rel_offset)
card_width /= (1 + 2*card_rel_margin)
back_offset = card_width * back_rel_offset
margin = (card_width + back_offset)*card_rel_margin
card_height = card_width*(MCDeck.settings.card_height_mm /
MCDeck.settings.card_width_mm)
height = card_height + back_offset
height += 2*margin
return height
class CardTypeDialog(QtWidgets.QDialog):
"""Dialog for selecting card type."""
_back_sources = [(None, 0)]*3
_back_lazy = [None]*3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__result = None
main_layout = QtWidgets.QVBoxLayout()
btn_width, btn_height = 93, 132
btns = []
layout = QtWidgets.QHBoxLayout()
_s = MCDeck.settings
d = ((_s.player_back_image(), _s.player_bleed_mm, 'Player',
'Use default back card for player cards'),
(_s.encounter_back_image(), _s.encounter_bleed_mm, 'Encounter',
'Use default back card for encounter cards'),
(_s.villain_back_image(), _s.villain_bleed_mm, 'Villain',
'Use default back card for villain cards'))
for i, entry in enumerate(zip(d, CardTypeDialog._back_sources,
CardTypeDialog._back_lazy)):
dval, back, lazy = entry
img, bleed, text, tip = dval
back_img, back_bleed = back
btn = QtWidgets.QPushButton()
if img:
if back_img is img and back_bleed == bleed:
# Lazy-copy icon if possible to avoid expensive rescale
icon = lazy
else:
if bleed > 0:
img = LcgImage(img).cropBleed(bleed)
img = img.scaled(btn_width, btn_height,
mode=QtCore.Qt.SmoothTransformation)
pix = QtGui.QPixmap.fromImage(img)
icon = QtGui.QIcon(pix)
CardTypeDialog._back_sources[i] = (img, bleed)
CardTypeDialog._back_lazy[i] = icon
btn.setIcon(icon)
btn.setIconSize(pix.rect().size())
btn.setToolTip(tip)
else:
btn.setText(text)
layout.addWidget(btn)
btns.append(btn)
btn = QtWidgets.QPushButton()
btn.setFixedSize(btn_width, btn_height)
btn.setText('Select\nfile')
btn.setToolTip('Select card back image')
layout.addWidget(btn)
btns.append(btn)
btn = QtWidgets.QPushButton()
btn.setFixedSize(btn_width, btn_height)
btn.setText('Same\nas\nfront')
btn.setToolTip('Use card front(s) as the card back(s)')
layout.addWidget(btn)
btns.append(btn)
btn = QtWidgets.QPushButton()
btn.setFixedSize(btn_width, btn_height)
btn.setText('No\ncard\nback')
btn.setToolTip('No back side image')
layout.addWidget(btn)
btns.append(btn)
main_layout.addLayout(layout)
main_layout.setAlignment(layout, QtCore.Qt.AlignHCenter)
btns[0].clicked.connect(self.clickedPlayer)
btns[1].clicked.connect(self.clickedEncounter)
btns[2].clicked.connect(self.clickedVillain)
btns[3].clicked.connect(self.clickedSelectBackImage)
btns[4].clicked.connect(self.clickedSameAsFront)
btns[5].clicked.connect(self.clickedNoBack)
# Pushbuttons
btns = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Cancel)
btns.rejected.connect(self.reject)
main_layout.addWidget(btns)
main_layout.setAlignment(btns, QtCore.Qt.AlignHCenter)
self.setLayout(main_layout)
self.setWindowTitle('Select card back')
@property
def result(self):
"""Result of accept operation in the form of (res_type, res_data)."""
return self.__result
@QtCore.Slot()
def clickedPlayer(self):
self.__result = (1, Card.type_player)
self.accept()
@QtCore.Slot()
def clickedEncounter(self):
self.__result = (1, Card.type_encounter)
self.accept()
@QtCore.Slot()
def clickedVillain(self):
self.__result = (1, Card.type_villain)
self.accept()
@QtCore.Slot()
def clickedSelectBackImage(self):
# Open dialog to select back side image
_fun = loadImageFromFileDialog
img = _fun(self, 'Open card back image')
if img:
self.__result = (2, img)
self.accept()
@QtCore.Slot()
def clickedSameAsFront(self):
self.__result = (3, None)
self.accept()
@QtCore.Slot()
def clickedNoBack(self):
self.__result = (4, None)
self.accept()
class MarvelCDBCardImportDialog(QtWidgets.QDialog):
"""Dialog for Tools -> MarvelCDB -> Import Card ..."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._ids = []
self.setWindowTitle('Import card(s) from MarvelCDB')
l = QtWidgets.QVBoxLayout()
_lbl = QtWidgets.QLabel
_tl = _lbl('Enter <a href="https://marvelcdb.com/">MarvelCDB</a> card '
'ID(s) or URL(s) separated by spaces or commas.')
_tl.setTextFormat(QtCore.Qt.RichText)
_tl.setOpenExternalLinks(True)
l.addWidget(_tl)
box = QtWidgets.QGroupBox()
box_l = QtWidgets.QHBoxLayout()
box_l.addWidget(QtWidgets.QLabel('ID(s) or URL(s):'))
self._le = QtWidgets.QLineEdit()
box_l.addWidget(self._le)
box.setLayout(box_l)
l.addWidget(box)
_l = QtWidgets.QHBoxLayout()
self._create_placeholders_chk = QtWidgets.QCheckBox()
self._create_placeholders_chk.setChecked(True)
_tip = ('If checked, then a placeholder image is generated if the '
'card has no image in MarvelCDB.')
self._create_placeholders_chk.setToolTip(_tip)
_l.addWidget(self._create_placeholders_chk)
_l.addWidget(_lbl('Create placeholder if no image in MarvelCDB'))
_l.addStretch(1)
l.addLayout(_l)
if not MCDeck.deck._octgn:
l.addWidget(_lbl('Note: importing MarvelCDB card(s) '
'automatically enables OCTGN metadata'))
l2 = QtWidgets.QHBoxLayout()
l2.addStretch(1)
btn_import = QtWidgets.QPushButton('Import')
btn_import.clicked.connect(self.accept)
btn_cancel = QtWidgets.QPushButton('Cancel')
btn_cancel.clicked.connect(self.reject)
l2.addWidget(btn_import)
l2.addWidget(btn_cancel)
l.addLayout(l2)
self.setLayout(l)
class MarvelCDBDeckImportDialog(QtWidgets.QDialog):
"""Dialog for Tools -> MarvelCDB -> Import Deck ..."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._ids = []
self.setWindowTitle('Import deck from MarvelCDB')
l = QtWidgets.QVBoxLayout()
_lbl = QtWidgets.QLabel
_tl = _lbl('Enter <a href="https://marvelcdb.com/">MarvelCDB</a> deck '
'ID or URL.')
_tl.setTextFormat(QtCore.Qt.RichText)
_tl.setOpenExternalLinks(True)
l.addWidget(_tl)
box = QtWidgets.QGroupBox()
box_l = QtWidgets.QHBoxLayout()
box_l.addWidget(QtWidgets.QLabel('Deck ID or URL:'))
self._le = QtWidgets.QLineEdit()
box_l.addWidget(self._le)
box.setLayout(box_l)
l.addWidget(box)
_l = QtWidgets.QHBoxLayout()
self._include_hero_cards_chk = QtWidgets.QCheckBox()
self._include_hero_cards_chk.setChecked(True)
_tip = ('If unchecked, hero cards are excluded from the import. This '
'is useful for combining non-hero cards from MarvelCDB with '
'a custom hero set.')
self._include_hero_cards_chk.setToolTip(_tip)
_l.addWidget(self._include_hero_cards_chk)
_l.addWidget(_lbl('Include hero cards when importing'))
_l.addStretch(1)
l.addLayout(_l)
_l = QtWidgets.QHBoxLayout()
self._include_non_hero_cards_chk = QtWidgets.QCheckBox()
self._include_non_hero_cards_chk.setChecked(True)
_tip = ('If unchecked, non-hero cards are excluded from the import. '
'This is useful for getting only a set of hero cards to '
'combine with custom aspect cards.')
self._include_non_hero_cards_chk.setToolTip(_tip)
_l.addWidget(self._include_non_hero_cards_chk)
_l.addWidget(_lbl('Include non-hero cards when importing'))
_l.addStretch(1)
l.addLayout(_l)
_l = QtWidgets.QHBoxLayout()
self._create_placeholders_chk = QtWidgets.QCheckBox()
self._create_placeholders_chk.setChecked(True)
_tip = ('If checked, then a placeholder image is generated if the '
'card has no image in MarvelCDB.')
self._create_placeholders_chk.setToolTip(_tip)
_l.addWidget(self._create_placeholders_chk)
_l.addWidget(_lbl('Create placeholder if no image in MarvelCDB'))
_l.addStretch(1)
l.addLayout(_l)
if not MCDeck.deck._octgn:
l.addWidget(_lbl('Note: importing a MarvelCDB deck '
'automatically enables OCTGN metadata'))
l2 = QtWidgets.QHBoxLayout()
l2.addStretch(1)
btn_import = QtWidgets.QPushButton('Import')
btn_import.clicked.connect(self.accept)
btn_cancel = QtWidgets.QPushButton('Cancel')
btn_cancel.clicked.connect(self.reject)
l2.addWidget(btn_import)
l2.addWidget(btn_cancel)
l.addLayout(l2)
self.setLayout(l)
class LoadMarvelCDBDialog(QtWidgets.QDialog):
"""Dialog for first time initialization of MarvelCDB cards index."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle('Download MarvelCDB cards index')
self.setMaximumWidth(600)
_l = QtWidgets.QVBoxLayout()
_txt = '''<p>Accessing <a href="https://marvelcdb.com/">MarvelCDB</a>
cards requires downloading a card index. Setting up access to all cards
is slower and more taxing on the MarvelCDB server, so <b>consider
downloading player cards</b> unless you also need encounters and
villains.</p>
<p>After constructing the card index, <b>PDF generation will be
disabled</b>until the app is closed (a gentle reminder that official
game cards should not be printed).</p>
<p>Choose which card set to download:</p>
'''
_lbl = QtWidgets.QLabel(_txt)
_lbl.setTextFormat(QtCore.Qt.RichText)
_lbl.setOpenExternalLinks(True)
_lbl.setWordWrap(True)
_l.addWidget(_lbl)
_l2 = QtWidgets.QHBoxLayout()
_l2.addStretch()
self._fast_btn = QtWidgets.QPushButton('Player cards')
self._fast_btn.clicked.connect(self.fast_btn)
_l2.addWidget(self._fast_btn)
self._slow_btn = QtWidgets.QPushButton('All cards')
self._slow_btn.clicked.connect(self.slow_btn)
_l2.addWidget(self._slow_btn)
self._cancel_btn = QtWidgets.QPushButton('Cancel')
self._cancel_btn.clicked.connect(self.reject)
_l2.addWidget(self._cancel_btn)
self._fast_btn.setDefault(True)
_l.addLayout(_l2)
self.setLayout(_l)
@QtCore.Slot()
def slow_btn(self):
self._all = True
self.accept()
@QtCore.Slot()
def fast_btn(self):
self._all = False
self.accept()
def main():
app = QtWidgets.QApplication([sys.argv[0]])
app.setApplicationName('MCdeck')
app.setApplicationVersion(mcdeck.__version__)
# Set up ArgumentParser for parsing command line arguments
_desc = 'MCdeck - Export custom cards for Marvel Champions: The Card Game'
parser = ArgumentParser(description=_desc)
parser.add_argument('deck', metavar='deck_file', nargs='?', type=str,
help='source deck to load (.zip or .mcd)')
parser.add_argument('--version', action='version',
version=f'%(prog)s {mcdeck.__version__}')
args = parser.parse_args(sys.argv[1:])
deck_path = args.deck
view = MCDeck()
view.resize(800, 600)
view.show()
if deck_path:
view.deck._open(deck_path)
view.deck._undo.clear()
sys.exit(app.exec())
if __name__ == '__main__':
main() | PypiClean |
/DMT_core-2.0.0-py3-none-any.whl/DMT/core/sim_con.py | import copy
import logging
import time
import subprocess
import itertools
from joblib import Parallel, delayed
from reprint import output
from pathlib import Path, PurePosixPath, PureWindowsPath
import multiprocessing
from DMT.core import Singleton, print_progress_bar
from DMT.config import DATA_CONFIG
from DMT.exceptions import SimulationUnsuccessful, SimulationFail
# import them always -> can become very annoying otherways (if default is False but one dut is remote)
from tempfile import NamedTemporaryFile
from zipfile import ZipFile
try:
import paramiko
from scp import SCPClient, SCPException
except ImportError:
pass
def upload_progress(filename, size, sent):
"""Callback function for Paramiko SCP Client while uploading files."""
print_progress_bar(sent, size, prefix="Uploading Simulations", suffix=filename, length=50)
class SimCon(object, metaclass=Singleton):
"""Simulation controller class. SINGLETON design pattern.
Parameters
----------
n_core : int
Number of cores that shall be used for simulations.
t_max : float
Timeout for simulations. If a simulation runs longer than t_max in seconds, it is killed.
Attributes
----------
n_core : int
Number of cores that shall be used for simulations.
t_max : float
Timeout for simulations. If a simulation runs longer than t_max in seconds, it is killed.
sim_list : [{'dut': :class:`~DMT.core.dut_view.DutView`, 'sweep': :class:`~DMT.core.sweep.Sweep`}]
A list of dicts containing the queued simulations. Each dict holds a 'dut' key value pair and a 'sweep' key value pair.
ssh_client
Client to execute SSH commands on a remote server.
scp_client
Client to transfer files to a remote server via SCP.
"""
def __init__(self, n_core=None, t_max=30):
if n_core is None:
# Use all available threads by default (for best performance)
self.n_core = multiprocessing.cpu_count()
else:
self.n_core = n_core
self.t_max = t_max
self.sim_list = []
### ssh stuff
self.ssh_client = None
self.scp_client = None
def clear_sim_list(self):
"""Remove everything from the sim_list"""
self.sim_list = []
def append_simulation(self, dut=None, sweep=None):
"""Adds DutViews together with Sweeps to the list of simulations sim_list.
This methods adds each dut with a copy of each sweep to the simulation list.
Parameters
----------
dut : :class:`~DMT.core.dut_view.DutView` or [:class:`~DMT.core.dut_view.DutView`]
Objected of a subclass of DutView. This object describes the device to be simulated and specifies the backend.
sweep : :class:`~DMT.core.sweep.Sweep` or [:class:`~DMT.core.sweep.Sweep`]
Definition of the sweep to be performed on the DUT according to the Sweep class.
"""
if not isinstance(dut, list):
dut = [dut]
if isinstance(sweep, list):
sweep = [copy.deepcopy(sweep_a) for sweep_a in sweep]
else:
sweep = [copy.deepcopy(sweep)]
self.sim_list += [
{"dut": dut_a, "sweep": sweep_a} for dut_a, sweep_a in itertools.product(dut, sweep)
]
def run_and_read(self, force=False, remove_simulations=False, parallel_read=False):
"""Run all queued simulations and load the results into the Duts' databases.
Parameters
----------
force : bool, optional
If True, the simulations will be run and saved back. If False, the simulations will only be run if that has not already been done before. This is ensured using the hash system., by default False
remove_simulations : bool, optional
If True, the simulation results will be deleted after read in, by default False. Activate to save disk space.
parallel_read : bool, optional
If True, the simulation results are read in using joblib parallel, by default False. Is False because some simulators have issues with this...
Returns
-------
boolean
True, if no simulation failed. This means it is also true if no simulation was run at all.
boolean
True, if any simulation was started. False if all simulations were read from hard disk.
"""
# reduce number of jobs if we only read a very low number of simulations
n_jobs = self.n_core if len(self.sim_list) > self.n_core else len(self.sim_list)
if n_jobs == 0: # sim list is empty
return True, False # all sims were successfull, but no simulations were run
elif not parallel_read:
n_jobs = 1
run_sims = False
if force:
logging.info("Simulations forced!")
sims_to_simulate = self.sim_list
run_sims = True
with Parallel(n_jobs=n_jobs, verbose=10) as parallel:
if not force:
# check which simulations are already loaded into dut.data or saved as a database file
n_tot = 0
for sim in self.sim_list:
dut = sim["dut"]
sweep = sim["sweep"]
dut_name = dut.name + str(dut.get_hash())
sim_name = sweep.name + "_" + sweep.get_hash()
if dut.check_existence_sweep(sweep):
print(
f"Simulation of DuT {dut_name} with sweep {sim_name} loaded from database.",
)
logging.info(
"Simulation of DuT %s with sweep %s loaded from database.",
dut_name,
sim_name,
)
sim["sweep_exists"] = True
else:
n_tot += 1
sim["sweep_exists"] = False
# remove all simulations which are already exist
self.sim_list = [sim for sim in self.sim_list if not sim["sweep_exists"]]
sims_to_simulate = []
if n_tot > 0:
# check which simulations are already run in the past but not imported
if parallel_read:
print("Checking which simulations need to be run in parallel:")
sims_checked = parallel(
delayed(_check_simulation_needed)(i_sim, n_tot, **sim)
for i_sim, sim in enumerate(self.sim_list)
)
else:
print("Checking which simulations need to be run:")
# parallel not working with VAE modelcard currently since get_circuit is monkey patched
sims_checked = [
_check_simulation_needed(i_sim, n_tot, **sim)
for i_sim, sim in enumerate(self.sim_list)
]
print_progress_bar(n_tot, n_tot, prefix="Finish", length=50)
print("\n") # new line after the progress bar
# add dalta to the duts and filter simuations to do
for sim_to_do, sim_checked in zip(
self.sim_list, sims_checked
): # as we are keeping the order, we can copy the data over
if sim_checked is None:
sims_to_simulate.append(sim_to_do)
else:
sim_to_do["dut"].data.update(sim_checked)
run_sims = bool(sims_to_simulate) # will be False if list is empty
# remote simulations ?
if any([sim for sim in sims_to_simulate if sim["dut"].simulate_on_server]):
self.create_ssh_client()
# start the simulations using the simulation control.
process_finished = self.run_simulations(sims_to_simulate)
if process_finished:
if parallel_read:
print("Reading in the results in parallel:")
sims_read = parallel(
delayed(_read_process_results)(
process["success"], process["dut"], process["sweep"]
)
for process in process_finished
)
else:
print("Reading in the results:")
# parallel not working with VAE modelcard currently since get_circuit is monkey patched
sims_read = [
_read_process_results(process["success"], process["dut"], process["sweep"])
for process in process_finished
]
all_sim_success = all(sim["success"] for sim in sims_read)
# read data
for sim in sims_read:
# find dut in self.sim_list
dut = next(
sim_to_do["dut"]
for sim_to_do in self.sim_list
if sim_to_do["dut"].get_hash() == sim["dut_hash"]
)
dut.data.update(sim["data"])
else:
all_sim_success = True # no simulations run -> all successfull
if self.ssh_client is not None:
self.close_ssh_client()
if remove_simulations:
# if storage saving is on, the read simulations can be deleted:
for sim in self.sim_list:
sim["dut"].delete_sim_results(sim["sweep"], ignore_errors=True)
# reset the queue
self.sim_list = []
return (
all_sim_success,
run_sims,
) # the list is empty if no simulations were necessary, empty list -> False
def create_ssh_client(self):
"""Creates the clients to communicate with the server."""
self.ssh_client = paramiko.SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_client.connect(
DATA_CONFIG["server"]["adress"],
username=DATA_CONFIG["server"]["ssh_user"],
key_filename=str(Path(DATA_CONFIG["server"]["ssh_key"]).expanduser()),
disabled_algorithms=dict(pubkeys=["rsa-sha2-512", "rsa-sha2-256"]),
)
self.scp_client = SCPClient(
self.ssh_client.get_transport(), socket_timeout=self.t_max, progress=upload_progress
)
# ensure the correct path:
if DATA_CONFIG["server"]["unix"]:
DATA_CONFIG["server"]["simulation_path"] = PurePosixPath(
DATA_CONFIG["server"]["simulation_path"]
)
else:
DATA_CONFIG["server"]["simulation_path"] = PureWindowsPath(
DATA_CONFIG["server"]["simulation_path"]
)
# make sure that the target folder exists
for folder in reversed(DATA_CONFIG["server"]["simulation_path"].parents):
self.ssh_client.exec_command("mkdir -p " + str(folder))
self.ssh_client.exec_command("mkdir -p " + str(DATA_CONFIG["server"]["simulation_path"]))
def close_ssh_client(self):
"""Closes the ssh connection again."""
self.ssh_client.close()
self.ssh_client = None
self.scp_client = None
def copy_zip_to_server(self, sims_to_zip):
"""Copies the simulation data to the server. Before doing this, old simulation data is deleted
Parameters
----------
sims_to_zip : list[dict]
A list of dictionaries with 2 keys:
dut : DutView
sweep : Sweep
"""
def add_to_zip(folder, rel_to):
# add folder (not really needed, if there is any file in the folder, but we don't know this....)
zip_ref.write(folder, arcname=folder.relative_to(rel_to))
# add all data inside folder
for child in folder.iterdir():
if child.is_file():
zip_ref.write(child, arcname=child.relative_to(rel_to))
else:
add_to_zip(child, rel_to)
assert self.ssh_client is not None
assert self.scp_client is not None
sim_path_on_server = DATA_CONFIG["server"]["simulation_path"]
commands = []
# delete possible old directories:
for sim_to_zip in sims_to_zip:
sim_folder = sim_to_zip["dut"].get_sim_folder(sim_to_zip["sweep"])
dut_folder = sim_folder.parts[-2]
sweep_folder = sim_folder.parts[-1]
commands.append("rm -rf " + str(sim_path_on_server / dut_folder / sweep_folder))
# https://stackoverflow.com/questions/34181078/execute-command-and-wait-for-it-to-finish-with-python-paramiko?noredirect=1&lq=1
for command in commands:
_stdin, stdout, _stderr = self.ssh_client.exec_command(command)
stdout.channel.set_combine_stderr(True)
_output = stdout.readlines()
with NamedTemporaryFile() as path_zip: # dut.get_sim_folder(sweep).relative_to(dut.sim_dir)
with ZipFile(path_zip, "w") as zip_ref:
for sim_to_zip in sims_to_zip:
add_to_zip(
sim_to_zip["dut"].get_sim_folder(sim_to_zip["sweep"]),
sim_to_zip["dut"].sim_dir,
)
# add "central" VA-Files -.-
if not sim_to_zip["dut"]._copy_va_files:
va_files_dir = sim_to_zip["dut"].sim_dir / "VA_codes"
for vafile in sim_to_zip["dut"]._list_va_file_contents:
dir_code = va_files_dir / vafile.get_tree_hash()
add_to_zip(
dir_code,
sim_to_zip["dut"].sim_dir,
)
# transfer and save name
self.scp_client.put(path_zip.name, remote_path=str(sim_path_on_server))
name_zip_file = Path(path_zip.name).name
# unzip
# here we should wait for finish
_stdin, stdout, _stderr = self.ssh_client.exec_command(
"unzip -o -d " + str(sim_path_on_server) + " " + str(sim_path_on_server / name_zip_file)
)
stdout.channel.set_combine_stderr(True)
output = stdout.readlines()
# delete temp zip on the server
self.ssh_client.exec_command("rm -f " + str(sim_path_on_server / name_zip_file))
def copy_from_server(self, dut, sweep, zip_result=True):
"""Collects the simulation data from the server.
Parameters
----------
dut : DutView
sweep : Sweep
zip_result : bool, optional
If True, the result is zipped before transfer, the zip is copied and then unzipped locally.
"""
sim_folder = dut.get_sim_folder(sweep)
root = sim_folder.parent
sweep_folder = sim_folder.parts[-1]
dut_folder = sim_folder.parts[-2]
if zip_result:
# delete possible old zip:
self.ssh_client.exec_command(
"rm -f {0:s}.zip".format(
str(DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder)
)
) # remove to be sure
# create new zip and copy it via scp
channel_zip = self.ssh_client.get_transport().open_session(timeout=self.t_max)
channel_zip.exec_command(
"cd {0:s} && zip -r {1:s}.zip ./{1:s}".format(
str(DATA_CONFIG["server"]["simulation_path"] / dut_folder), sweep_folder
)
)
while not channel_zip.exit_status_ready():
time.sleep(0.5)
try:
self.scp_client.get(
str(DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder)
+ ".zip",
local_path=str(root),
)
except (SCPException, paramiko.SSHException, TimeoutError) as err:
raise FileNotFoundError() from err
path_zip = sim_folder.with_suffix(".zip")
with ZipFile(path_zip, "r") as zip_ref:
zip_ref.extractall(root)
path_zip.unlink()
else:
try:
self.scp_client.get(
str(DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder),
local_path=str(root),
recursive=True,
)
except (SCPException, paramiko.SSHException) as err:
# reraise it in order to allow run_and_read to go on and try again in 2 seconds
raise FileNotFoundError() from err
def copy_log_from_server(self, dut, sweep):
"""Collects the simulation log file from the server.
Parameters
----------
dut : DutView
sweep : Sweep
"""
sim_folder = dut.get_sim_folder(sweep)
root = sim_folder.parent
sweep_folder = sim_folder.parts[-1]
dut_folder = sim_folder.parts[-2]
try:
self.scp_client.get(
str(
DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder / "sim.log"
),
local_path=str(root / sweep_folder),
recursive=True,
)
except (SCPException, paramiko.SSHException) as err:
# reraise it in order to allow run_and_read to go on and try again in 2 seconds
raise FileNotFoundError() from err
def run_simulations(self, sim_list):
"""Runs all given simulations in parallel.
Parameters
----------
sim_list : [{}]
List of dictionaries, each dictionary has a 'dut': :class:`~DMT.core.DutView` and 'sweep': :class:`~DMT.core.Sweep` key value pair.
Returns
-------
success : list[process]
List of finished processes
"""
if len(sim_list) == 0:
return []
# test if same simulation is added twice.
set_dut_hashes = set([sim_i["dut"].get_hash() for sim_i in sim_list])
list_to_delete = []
for dut_hash in set_dut_hashes:
list_sweep_hashes = []
for i_sim, sim_a in enumerate(sim_list):
if sim_a["dut"].get_hash() == dut_hash:
if sim_a["sweep"].get_hash() in list_sweep_hashes:
list_to_delete.append(i_sim)
else:
list_sweep_hashes.append(sim_a["sweep"].get_hash())
for to_delete in sorted(list_to_delete, reverse=True):
del sim_list[to_delete]
# start simulations
process_running = []
process_finished = []
finished = False
n = 0
n_total = len(sim_list)
# prepare simulations
print_progress_bar(0, len(sim_list), prefix="Preparing Simulations", length=50)
sims_to_zip = []
# if True: use pbs job scheduler
pbs = DATA_CONFIG["server"]["use_pbs"] and DATA_CONFIG["backend_remote"]
if DATA_CONFIG["progress_minimal"]:
len_output = 2
else:
len_output = self.n_core + 7
for i_sim, sim in enumerate(sim_list):
sweep = sim["sweep"]
dut = sim["dut"]
print_progress_bar(i_sim, len(sim_list), prefix="Preparing Simulations", length=50)
dut.prepare_simulation(sweep)
if dut.simulate_on_server:
sims_to_zip.append({"dut": dut, "sweep": sweep})
print_progress_bar(len(sim_list), len(sim_list), prefix="Preparing Simulations", length=50)
print("\n") # new line after the progress bar
if sims_to_zip:
print("Uploading simulation input files and folders to server...")
self.copy_zip_to_server(sims_to_zip)
print("finish uploading.")
# do not print download status
if self.scp_client is not None:
self.scp_client._progress = False
with output(output_type="list", initial_len=len_output, interval=0) as output_list:
while not finished:
# run infinite processes parallel on the server
# if (len([process for process in process_running if not process['backend_remote']]) < self.n_core) and (len(sim_list) > 0 ):
if (len([process for process in process_running]) < self.n_core) and (
len(sim_list) > 0
):
# take the next element from the self.sim_list and start it
sim = sim_list[0]
sim_list = sim_list[1:]
# start the simulation on this core
sweep = sim["sweep"]
dut = sim["dut"]
if (
not hasattr(dut, "t_max") or dut.t_max is None
): # make sure t_max is set in every simulated dut
dut.t_max = self.t_max
if dut.simulate_on_server:
pid = self.run_simulation_remote(dut, sweep, pbs=pbs)
process = 0
else:
process = self.run_simulation_local(dut, sweep)
pid = process.pid
if pid == 0:
continue # failed to start simulation, just wait and try again
if hasattr(dut, "zip_result"):
zip_result = dut.zip_result
else:
zip_result = True # per default True, it is better because scp struggles with many files...
n += 1
t0 = time.time()
process_running.append(
{
"n": n,
"t0": t0,
"dt": t0,
"dut": dut,
"sweep": sweep,
"process": process,
"pid": pid,
"success": True,
"backend_remote": dut.simulate_on_server,
"last_poll": 0,
"zip_result": zip_result,
}
)
# check for finished processes. DO THIS BEFORE TIMEOUT CHECKING.
for p in process_running:
process = p["process"]
if p["backend_remote"]:
p["last_poll"] += 1
if (
p["last_poll"] % 5 == 0
): # every 20th round -> every 2 seconds (is this too much?)
if pbs:
# use qstat
_stdin, stdout, _stderr = self.ssh_client.exec_command(
("qstat_script " + str(p["pid"]))
)
out = str(stdout.read())
if (
"Unknown Job" in out or out == "b''"
): # if job finished, these strings are returned
try:
self.copy_from_server(
p["dut"], p["sweep"], zip_result=p["zip_result"]
)
process_finished.append(p)
try:
p["dut"].validate_simulation_successful(p["sweep"])
except (
SimulationFail,
SimulationUnsuccessful,
FileNotFoundError,
):
p["success"] = False
except (SimulationUnsuccessful, FileNotFoundError):
pass # just try again
else: # copy everything and check => slow
try:
self.copy_log_from_server(p["dut"], p["sweep"])
p["dut"].validate_simulation_successful(p["sweep"])
self.copy_from_server(
p["dut"], p["sweep"], zip_result=p["zip_result"]
)
process_finished.append(p)
except (SimulationUnsuccessful, FileNotFoundError):
pass
except SimulationFail:
p["success"] = False
process_finished.append(p)
else:
returncode = process.poll()
if returncode is not None:
if (
returncode != 0
and returncode != 134
and returncode != 139
and returncode != 1
): # 134 sometimes happens but still ads works...
p["success"] = False
process_finished.append(p)
# check for timeouts
t = time.time()
for p in process_running:
p["dt"] = t - p["t0"]
if (p["dt"] > p["dut"].t_max) and (
p["dt"] > self.t_max
): # both t_max have to be smaller than the simulation time
if not p["backend_remote"]:
p["process"].kill()
# TODO: kill with pbs
p["success"] = False
process_finished.append(p)
# remove finished processes from running processes
for p in process_finished:
if p in process_running:
process_running.remove(p)
# update status that is displayed on the console
len_progress = 20 # number of #
progress = int(
len(process_finished)
/ (len(sim_list) + len(process_running) + len(process_finished))
* len_progress
)
output_list[0] = "DMT is now simulating! "
output_list[1] = (
"finished: "
+ str(len(process_finished))
+ " of "
+ str(n_total)
+ ":["
+ "#" * progress
+ " " * (len_progress - progress)
+ "]"
)
if not DATA_CONFIG["progress_minimal"]:
output_list[2] = "-------------------------------"
output_list[3] = "| sim_n | pid | dt |"
output_list[4] = "-------------------------------"
for i in range(self.n_core):
try:
p = process_running[i]
str_ = "|{:^7d}|{:^12d}|{:^8.1f}|".format(p["n"], p["pid"], p["dt"])
except (KeyError, IndexError):
str_ = "|{:^7s}|{:^12s}|{:^8.1f}|".format("x", "x", 0)
output_list[i + 5] = str_
output_list[-2] = "-------------------------------"
output_list[-1] = " "
# are we finished?
if len(process_running) == 0 and len(sim_list) == 0:
finished = True
elif len(process_running) == self.n_core or len(sim_list) == 0:
time.sleep(0.1)
# print download status
if self.scp_client is not None:
self.scp_client._progress = True
return process_finished
def run_simulation_local(self, dut, sweep):
"""Starts the simulation
Parameters
----------
dut : DutView
sweep : Sweep
"""
sim_folder = dut.get_sim_folder(sweep)
logging.info(
"Started the simulation for the dut %s of the sweep %s!", dut.get_hash(), sweep.name
)
logging.debug("The simulation folder of this simulation is %s", sim_folder)
log_file = open(sim_folder / "sim.log", "w")
log_file.write(f"The simulation command is\n{dut.get_start_sim_command()}\n\n")
return subprocess.Popen(
dut.get_start_sim_command().split(),
shell=False,
cwd=sim_folder,
stderr=subprocess.STDOUT,
stdout=log_file,
)
def run_simulation_remote(self, dut, sweep, pbs=False):
"""Starts the remote simulation
Parameters
----------
dut : DutView
sweep : Sweep
pbs : Boolean
Returns
-------
pid : int
0 if failed, -1 if running via ssh directly and id of job for PBS simulation.
"""
sim_folder = dut.get_sim_folder(sweep)
sweep_folder = sim_folder.parts[-1]
dut_folder = sim_folder.parts[-2]
logging.info(
"Started the remote simulation for the dut %s of the sweep %s!",
dut.get_hash(),
sweep.name,
)
logging.debug("The simulation folder of this simulation is %s", sim_folder)
# start a subprocess with the ssh command
if not pbs:
_stdin, _stdout, _stderr = self.ssh_client.exec_command(
(
"cd "
+ str(DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder)
+ ";"
+ dut.get_start_sim_command()
+ " > sim.log &"
)
)
## useful for debugging:
# for line in iter(_stdout.readline, ""):
# print(line, end="")
# for line in iter(_stderr.readline, ""):
# print(line, end="")
return -1
else:
_stdin, stdout, _stderr = self.ssh_client.exec_command(
(
"cd "
+ str(DATA_CONFIG["server"]["simulation_path"] / dut_folder / sweep_folder)
+ ";"
+ DATA_CONFIG["server"]["command_qsub"]
)
)
output = stdout.read()
_error = _stderr.read()
id_ = "".join([n for n in str(output).split(".")[0] if n.isdigit()])
try:
return int(id_)
except ValueError:
return 0
def _check_simulation_needed(i_sim, n_tot, dut=None, sweep=None, sweep_exists=None):
"""Function to check if the simulation is needed or already present in the database
Parameter
-----------
dut : DMT.core.DutView
sweep : DMT.core.Sweep
Returns
-------
{key: DMT.core.Dataframe}
In case the data is read from database or previous simulation.
None
In case the simulation must be done.
"""
dut_name = dut.name + str(dut.get_hash())
sim_name = sweep.name + "_" + sweep.get_hash()
# print("Check: dut: {:s}, sweep: {:s}".format(dut_name, sim_name))
print_progress_bar(i_sim, n_tot, prefix="Progress", length=50)
try:
# was it simulated already successfully ?
dut.validate_simulation_successful(sweep)
print(
f"\n Simulation of DuT {dut_name} with sweep {sim_name} already done and results are valid, only data needs to be read.",
)
logging.info(
"Simulation of DuT %s with sweep %s already done and results are valid, only data needs to be read.",
dut_name,
sim_name,
)
logging.debug("The simulation folder of this simulation was %s", dut.get_sim_folder(sweep))
dut.add_data(sweep)
except SimulationFail:
print(
f"\n Simulation of DuT {dut_name} with sweep {sim_name} already done and failed.",
)
# except (SimulationUnsuccessful, FileNotFoundError, IndexError, struct.error):
except: # all exceptions should be re-simulated
# ok simulate it!
dut.delete_sim_results(sweep, ignore_errors=True) # remove for safety
logging.info("Simulation of DuT %s with sweep %s needed.", dut_name, sim_name)
return None
return dut.data
def _read_process_results(success, dut, sweep):
"""Read the process results
Parameter
-----------
success : bool
dut : DMT.core.DutView
sweep : DMT.core.Sweep
Returns
-------
{'success': success, 'dut_hash':dut.get_hash(), 'data':dut.data}
"""
dut_name = dut.name + str(dut.get_hash())
sim_name = sweep.name + "_" + sweep.get_hash()
sim_folder = dut.get_sim_folder(sweep)
print("Read: dut: {:s}, sweep: {:s}".format(dut_name, sim_name))
# inform data_manager about the finished simulations
try:
if success:
dut.add_data(sweep)
logging.info("Simulation of DuT %s with sweep %s successfull.", dut_name, sim_name)
else:
color_red = "\033[91m"
color_end = "\033[0m"
print(
"{0:s}Simulation of DuT {1:s} with sweep {2:s} failed.{3:s}".format(
color_red, dut_name, sim_name, color_end
)
)
print(
"{0:s}Simulation folder: {1:s} {2:s}".format(color_red, str(sim_folder), color_end)
)
print((sim_folder / "sim.log").read_text())
logging.info("Simulation of DuT %s with sweep %s failed.", dut_name, sim_name)
except (SimulationUnsuccessful, FileNotFoundError, KeyError):
color_red = "\033[91m"
color_end = "\033[0m"
print(
"{0:s}Simulation of DuT {1:s} with sweep {2:s} failed.{3:s}".format(
color_red, dut_name, sim_name, color_end
)
)
print("{0:s}Simulation folder: {1:s} {2:s}".format(color_red, str(sim_folder), color_end))
print((sim_folder / "sim.log").read_text())
logging.info("Simulation of DuT %s with sweep %s failed.", dut_name, sim_name)
return {"success": success, "dut_hash": dut.get_hash(), "data": dut.data} | PypiClean |
/Markdown-3.4.4-py3-none-any.whl/markdown/extensions/__init__.py | from ..util import parseBoolValue
class Extension:
""" Base class for extensions to subclass. """
# Default configuration -- to be overridden by a subclass
# Must be of the following format:
# {
# 'key': ['value', 'description']
# }
# Note that `Extension.setConfig` will raise a `KeyError`
# if a default is not set here.
config = {}
def __init__(self, **kwargs):
""" Initiate Extension and set up configs. """
self.setConfigs(kwargs)
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return {key: self.getConfig(key) for key in self.config.keys()}
def getConfigInfo(self):
""" Return all `config` descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a `config` setting for `key` with the given `value`. """
if isinstance(self.config[key][0], bool):
value = parseBoolValue(value)
if self.config[key][0] is None:
value = parseBoolValue(value, preserve_none=True)
self.config[key][0] = value
def setConfigs(self, items):
""" Set multiple `config` settings given a dict or list of tuples. """
if hasattr(items, 'items'):
# it's a dict
items = items.items()
for key, value in items:
self.setConfig(key, value)
def extendMarkdown(self, md):
"""
Add the various processors and patterns to the Markdown Instance.
This method must be overridden by every extension.
Keyword arguments:
* md: The Markdown instance.
"""
raise NotImplementedError(
'Extension "%s.%s" must define an "extendMarkdown"'
'method.' % (self.__class__.__module__, self.__class__.__name__)
) | PypiClean |
/MySQL-python-vincent-1.2.5.tar.gz/MySQL-python-vincent-1.2.5/_mysql_exceptions.py | try:
from exceptions import Exception, StandardError, Warning
except ImportError:
# Python 3
StandardError = Exception
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off.""" | PypiClean |
/Discode.py-1.1.1.tar.gz/Discode.py-1.1.1/discode/intents.py | class Intents:
r"""Discord requires you to set the amount of gateway intents your bot uses."""
def __init__(self, **intents):
self.value = 0
for k, v in intents.items():
if v:
intent = getattr(self, k, None)
if not intent:
raise ValueError(f"Intent called {k} does not exist!")
intent
@classmethod
def default(cls):
kwargs = {
"guilds": True,
"messages": True,
"members": True,
"reactions": True,
"typing": True,
"emojis": True,
"invites": True,
"events": True,
}
return cls(**kwargs)
@classmethod
def all(cls: "Intents"):
i = cls()
i.value = 32767
return i
@property
def guilds(self):
self.value += 1 << 0
return self
@property
def members(self):
self.value += 1 << 1
return self
@property
def bans(self):
self.value += 1 << 2
return self
@property
def emojis(self):
self.value += 1 << 3
return self
@property
def integrations(self):
self.value += 1 << 4
return self
@property
def webhooks(self):
self.value += 1 << 5
return self
@property
def invites(self):
self.value += 1 << 6
return self
@property
def voice_states(self):
self.value += 1 << 7
return self
@property
def presence(self):
self.value += 1 << 8
return self
@property
def guild_messages(self):
self.value += 1 << 9
return self
@property
def direct_messages(self):
self.value += 1 << 12
return self
@property
def messages(self):
self.value += ((1 << 9) + (1 << 12))
return self
@property
def reactions(self):
self.value += 1 << 10
self.value += 1 << 13
return self
@property
def typing(self):
self.guild_typing
self.dm_typing
return self
@property
def guild_typing(self):
self.value += 1 << 11
return self
@property
def dm_typing(self):
self.value += 1 << 14
return self
@property
def events(self):
self.value += 1 << 15
return self | PypiClean |
/FastGets-0.3.5.tar.gz/FastGets-0.3.5/fastgets/web/static/dist/plugins/visualchars/plugin.js | (function () {
var defs = {}; // id -> {dependencies, definition, instance (possibly undefined)}
// Used when there is no 'main' module.
// The name is probably (hopefully) unique so minification removes for releases.
var register_3795 = function (id) {
var module = dem(id);
var fragments = id.split('.');
var target = Function('return this;')();
for (var i = 0; i < fragments.length - 1; ++i) {
if (target[fragments[i]] === undefined) { target[fragments[i]] = {}; }
target = target[fragments[i]];
}
target[fragments[fragments.length - 1]] = module;
};
var instantiate = function (id) {
var actual = defs[id];
var dependencies = actual.deps;
var definition = actual.defn;
var len = dependencies.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(dependencies[i]); }
var defResult = definition.apply(null, instances);
if (defResult === undefined) { throw 'module [' + id + '] returned undefined'; }
actual.instance = defResult;
};
var def = function (id, dependencies, definition) {
if (typeof id !== 'string') { throw 'module id must be a string'; } else if (dependencies === undefined) { throw 'no dependencies for ' + id; } else if (definition === undefined) { throw 'no definition function for ' + id; }
defs[id] = {
deps: dependencies,
defn: definition,
instance: undefined
};
};
var dem = function (id) {
var actual = defs[id];
if (actual === undefined) { throw 'module [' + id + '] was undefined'; } else if (actual.instance === undefined) { instantiate(id); }
return actual.instance;
};
var req = function (ids, callback) {
var len = ids.length;
var instances = new Array(len);
for (var i = 0; i < len; ++i) { instances[i] = dem(ids[i]); }
callback.apply(null, instances);
};
var ephox = {};
ephox.bolt = {
module: {
api: {
define: def,
require: req,
demand: dem
}
}
};
var define = def;
var require = req;
var demand = dem;
// this helps with minification when using a lot of global references
var defineGlobal = function (id, ref) {
define(id, [], function () { return ref; });
};
/* jsc
["tinymce.plugins.visualchars.Plugin","ephox.katamari.api.Cell","tinymce.core.PluginManager","tinymce.plugins.visualchars.api.Api","tinymce.plugins.visualchars.api.Commands","tinymce.plugins.visualchars.core.Keyboard","tinymce.plugins.visualchars.ui.Buttons","global!tinymce.util.Tools.resolve","tinymce.plugins.visualchars.core.Actions","tinymce.core.util.Delay","tinymce.plugins.visualchars.core.VisualChars","tinymce.plugins.visualchars.api.Events","tinymce.plugins.visualchars.core.Data","tinymce.plugins.visualchars.core.Nodes","ephox.katamari.api.Arr","ephox.sugar.api.node.Element","ephox.sugar.api.node.Node","ephox.katamari.api.Option","global!Array","global!Error","global!String","ephox.katamari.api.Fun","global!console","global!document","ephox.sugar.api.node.NodeTypes","tinymce.plugins.visualchars.core.Html","global!Object"]
jsc */
define(
'ephox.katamari.api.Cell',
[
],
function () {
var Cell = function (initial) {
var value = initial;
var get = function () {
return value;
};
var set = function (v) {
value = v;
};
var clone = function () {
return Cell(get());
};
return {
get: get,
set: set,
clone: clone
};
};
return Cell;
}
);
defineGlobal('global!tinymce.util.Tools.resolve', tinymce.util.Tools.resolve);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.PluginManager',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.PluginManager');
}
);
/**
* Api.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.api.Api',
[
],
function () {
var get = function (toggleState) {
var isEnabled = function () {
return toggleState.get();
};
return {
isEnabled: isEnabled
};
};
return {
get: get
};
}
);
/**
* Events.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.api.Events',
[
],
function () {
var fireVisualChars = function (editor, state) {
return editor.fire('VisualChars', { state: state });
};
return {
fireVisualChars: fireVisualChars
};
}
);
/**
* Data.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.Data',
[
],
function () {
var charMap = {
'\u00a0': 'nbsp',
'\u00ad': 'shy'
};
var charMapToRegExp = function (charMap, global) {
var key, regExp = '';
for (key in charMap) {
regExp += key;
}
return new RegExp('[' + regExp + ']', global ? 'g' : '');
};
var charMapToSelector = function (charMap) {
var key, selector = '';
for (key in charMap) {
if (selector) {
selector += ',';
}
selector += 'span.mce-' + charMap[key];
}
return selector;
};
return {
charMap: charMap,
regExp: charMapToRegExp(charMap),
regExpGlobal: charMapToRegExp(charMap, true),
selector: charMapToSelector(charMap),
charMapToRegExp: charMapToRegExp,
charMapToSelector: charMapToSelector
};
}
);
defineGlobal('global!Array', Array);
defineGlobal('global!Error', Error);
define(
'ephox.katamari.api.Fun',
[
'global!Array',
'global!Error'
],
function (Array, Error) {
var noop = function () { };
var compose = function (fa, fb) {
return function () {
return fa(fb.apply(null, arguments));
};
};
var constant = function (value) {
return function () {
return value;
};
};
var identity = function (x) {
return x;
};
var tripleEquals = function (a, b) {
return a === b;
};
// Don't use array slice(arguments), makes the whole function unoptimisable on Chrome
var curry = function (f) {
// equivalent to arguments.slice(1)
// starting at 1 because 0 is the f, makes things tricky.
// Pay attention to what variable is where, and the -1 magic.
// thankfully, we have tests for this.
var args = new Array(arguments.length - 1);
for (var i = 1; i < arguments.length; i++) args[i - 1] = arguments[i];
return function () {
var newArgs = new Array(arguments.length);
for (var j = 0; j < newArgs.length; j++) newArgs[j] = arguments[j];
var all = args.concat(newArgs);
return f.apply(null, all);
};
};
var not = function (f) {
return function () {
return !f.apply(null, arguments);
};
};
var die = function (msg) {
return function () {
throw new Error(msg);
};
};
var apply = function (f) {
return f();
};
var call = function (f) {
f();
};
var never = constant(false);
var always = constant(true);
return {
noop: noop,
compose: compose,
constant: constant,
identity: identity,
tripleEquals: tripleEquals,
curry: curry,
not: not,
die: die,
apply: apply,
call: call,
never: never,
always: always
};
}
);
defineGlobal('global!Object', Object);
define(
'ephox.katamari.api.Option',
[
'ephox.katamari.api.Fun',
'global!Object'
],
function (Fun, Object) {
var never = Fun.never;
var always = Fun.always;
/**
Option objects support the following methods:
fold :: this Option a -> ((() -> b, a -> b)) -> Option b
is :: this Option a -> a -> Boolean
isSome :: this Option a -> () -> Boolean
isNone :: this Option a -> () -> Boolean
getOr :: this Option a -> a -> a
getOrThunk :: this Option a -> (() -> a) -> a
getOrDie :: this Option a -> String -> a
or :: this Option a -> Option a -> Option a
- if some: return self
- if none: return opt
orThunk :: this Option a -> (() -> Option a) -> Option a
- Same as "or", but uses a thunk instead of a value
map :: this Option a -> (a -> b) -> Option b
- "fmap" operation on the Option Functor.
- same as 'each'
ap :: this Option a -> Option (a -> b) -> Option b
- "apply" operation on the Option Apply/Applicative.
- Equivalent to <*> in Haskell/PureScript.
each :: this Option a -> (a -> b) -> Option b
- same as 'map'
bind :: this Option a -> (a -> Option b) -> Option b
- "bind"/"flatMap" operation on the Option Bind/Monad.
- Equivalent to >>= in Haskell/PureScript; flatMap in Scala.
flatten :: {this Option (Option a))} -> () -> Option a
- "flatten"/"join" operation on the Option Monad.
exists :: this Option a -> (a -> Boolean) -> Boolean
forall :: this Option a -> (a -> Boolean) -> Boolean
filter :: this Option a -> (a -> Boolean) -> Option a
equals :: this Option a -> Option a -> Boolean
equals_ :: this Option a -> (Option a, a -> Boolean) -> Boolean
toArray :: this Option a -> () -> [a]
*/
var none = function () { return NONE; };
var NONE = (function () {
var eq = function (o) {
return o.isNone();
};
// inlined from peanut, maybe a micro-optimisation?
var call = function (thunk) { return thunk(); };
var id = function (n) { return n; };
var noop = function () { };
var me = {
fold: function (n, s) { return n(); },
is: never,
isSome: never,
isNone: always,
getOr: id,
getOrThunk: call,
getOrDie: function (msg) {
throw new Error(msg || 'error: getOrDie called on none.');
},
or: id,
orThunk: call,
map: none,
ap: none,
each: noop,
bind: none,
flatten: none,
exists: never,
forall: always,
filter: none,
equals: eq,
equals_: eq,
toArray: function () { return []; },
toString: Fun.constant('none()')
};
if (Object.freeze) Object.freeze(me);
return me;
})();
/** some :: a -> Option a */
var some = function (a) {
// inlined from peanut, maybe a micro-optimisation?
var constant_a = function () { return a; };
var self = function () {
// can't Fun.constant this one
return me;
};
var map = function (f) {
return some(f(a));
};
var bind = function (f) {
return f(a);
};
var me = {
fold: function (n, s) { return s(a); },
is: function (v) { return a === v; },
isSome: always,
isNone: never,
getOr: constant_a,
getOrThunk: constant_a,
getOrDie: constant_a,
or: self,
orThunk: self,
map: map,
ap: function (optfab) {
return optfab.fold(none, function (fab) {
return some(fab(a));
});
},
each: function (f) {
f(a);
},
bind: bind,
flatten: constant_a,
exists: bind,
forall: bind,
filter: function (f) {
return f(a) ? me : NONE;
},
equals: function (o) {
return o.is(a);
},
equals_: function (o, elementEq) {
return o.fold(
never,
function (b) { return elementEq(a, b); }
);
},
toArray: function () {
return [a];
},
toString: function () {
return 'some(' + a + ')';
}
};
return me;
};
/** from :: undefined|null|a -> Option a */
var from = function (value) {
return value === null || value === undefined ? NONE : some(value);
};
return {
some: some,
none: none,
from: from
};
}
);
defineGlobal('global!String', String);
define(
'ephox.katamari.api.Arr',
[
'ephox.katamari.api.Option',
'global!Array',
'global!Error',
'global!String'
],
function (Option, Array, Error, String) {
// Use the native Array.indexOf if it is available (IE9+) otherwise fall back to manual iteration
// https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/Array/indexOf
var rawIndexOf = (function () {
var pIndexOf = Array.prototype.indexOf;
var fastIndex = function (xs, x) { return pIndexOf.call(xs, x); };
var slowIndex = function (xs, x) { return slowIndexOf(xs, x); };
return pIndexOf === undefined ? slowIndex : fastIndex;
})();
var indexOf = function (xs, x) {
// The rawIndexOf method does not wrap up in an option. This is for performance reasons.
var r = rawIndexOf(xs, x);
return r === -1 ? Option.none() : Option.some(r);
};
var contains = function (xs, x) {
return rawIndexOf(xs, x) > -1;
};
// Using findIndex is likely less optimal in Chrome (dynamic return type instead of bool)
// but if we need that micro-optimisation we can inline it later.
var exists = function (xs, pred) {
return findIndex(xs, pred).isSome();
};
var range = function (num, f) {
var r = [];
for (var i = 0; i < num; i++) {
r.push(f(i));
}
return r;
};
// It's a total micro optimisation, but these do make some difference.
// Particularly for browsers other than Chrome.
// - length caching
// http://jsperf.com/browser-diet-jquery-each-vs-for-loop/69
// - not using push
// http://jsperf.com/array-direct-assignment-vs-push/2
var chunk = function (array, size) {
var r = [];
for (var i = 0; i < array.length; i += size) {
var s = array.slice(i, i + size);
r.push(s);
}
return r;
};
var map = function (xs, f) {
// pre-allocating array size when it's guaranteed to be known
// http://jsperf.com/push-allocated-vs-dynamic/22
var len = xs.length;
var r = new Array(len);
for (var i = 0; i < len; i++) {
var x = xs[i];
r[i] = f(x, i, xs);
}
return r;
};
// Unwound implementing other functions in terms of each.
// The code size is roughly the same, and it should allow for better optimisation.
var each = function (xs, f) {
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
f(x, i, xs);
}
};
var eachr = function (xs, f) {
for (var i = xs.length - 1; i >= 0; i--) {
var x = xs[i];
f(x, i, xs);
}
};
var partition = function (xs, pred) {
var pass = [];
var fail = [];
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
var arr = pred(x, i, xs) ? pass : fail;
arr.push(x);
}
return { pass: pass, fail: fail };
};
var filter = function (xs, pred) {
var r = [];
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
if (pred(x, i, xs)) {
r.push(x);
}
}
return r;
};
/*
* Groups an array into contiguous arrays of like elements. Whether an element is like or not depends on f.
*
* f is a function that derives a value from an element - e.g. true or false, or a string.
* Elements are like if this function generates the same value for them (according to ===).
*
*
* Order of the elements is preserved. Arr.flatten() on the result will return the original list, as with Haskell groupBy function.
* For a good explanation, see the group function (which is a special case of groupBy)
* http://hackage.haskell.org/package/base-4.7.0.0/docs/Data-List.html#v:group
*/
var groupBy = function (xs, f) {
if (xs.length === 0) {
return [];
} else {
var wasType = f(xs[0]); // initial case for matching
var r = [];
var group = [];
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
var type = f(x);
if (type !== wasType) {
r.push(group);
group = [];
}
wasType = type;
group.push(x);
}
if (group.length !== 0) {
r.push(group);
}
return r;
}
};
var foldr = function (xs, f, acc) {
eachr(xs, function (x) {
acc = f(acc, x);
});
return acc;
};
var foldl = function (xs, f, acc) {
each(xs, function (x) {
acc = f(acc, x);
});
return acc;
};
var find = function (xs, pred) {
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
if (pred(x, i, xs)) {
return Option.some(x);
}
}
return Option.none();
};
var findIndex = function (xs, pred) {
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
if (pred(x, i, xs)) {
return Option.some(i);
}
}
return Option.none();
};
var slowIndexOf = function (xs, x) {
for (var i = 0, len = xs.length; i < len; ++i) {
if (xs[i] === x) {
return i;
}
}
return -1;
};
var push = Array.prototype.push;
var flatten = function (xs) {
// Note, this is possible because push supports multiple arguments:
// http://jsperf.com/concat-push/6
// Note that in the past, concat() would silently work (very slowly) for array-like objects.
// With this change it will throw an error.
var r = [];
for (var i = 0, len = xs.length; i < len; ++i) {
// Ensure that each value is an array itself
if (!Array.prototype.isPrototypeOf(xs[i])) throw new Error('Arr.flatten item ' + i + ' was not an array, input: ' + xs);
push.apply(r, xs[i]);
}
return r;
};
var bind = function (xs, f) {
var output = map(xs, f);
return flatten(output);
};
var forall = function (xs, pred) {
for (var i = 0, len = xs.length; i < len; ++i) {
var x = xs[i];
if (pred(x, i, xs) !== true) {
return false;
}
}
return true;
};
var equal = function (a1, a2) {
return a1.length === a2.length && forall(a1, function (x, i) {
return x === a2[i];
});
};
var slice = Array.prototype.slice;
var reverse = function (xs) {
var r = slice.call(xs, 0);
r.reverse();
return r;
};
var difference = function (a1, a2) {
return filter(a1, function (x) {
return !contains(a2, x);
});
};
var mapToObject = function (xs, f) {
var r = {};
for (var i = 0, len = xs.length; i < len; i++) {
var x = xs[i];
r[String(x)] = f(x, i);
}
return r;
};
var pure = function (x) {
return [x];
};
var sort = function (xs, comparator) {
var copy = slice.call(xs, 0);
copy.sort(comparator);
return copy;
};
var head = function (xs) {
return xs.length === 0 ? Option.none() : Option.some(xs[0]);
};
var last = function (xs) {
return xs.length === 0 ? Option.none() : Option.some(xs[xs.length - 1]);
};
return {
map: map,
each: each,
eachr: eachr,
partition: partition,
filter: filter,
groupBy: groupBy,
indexOf: indexOf,
foldr: foldr,
foldl: foldl,
find: find,
findIndex: findIndex,
flatten: flatten,
bind: bind,
forall: forall,
exists: exists,
contains: contains,
equal: equal,
reverse: reverse,
chunk: chunk,
difference: difference,
mapToObject: mapToObject,
pure: pure,
sort: sort,
range: range,
head: head,
last: last
};
}
);
define('global!console', [], function () { if (typeof console === 'undefined') console = { log: function () {} }; return console; });
defineGlobal('global!document', document);
define(
'ephox.sugar.api.node.Element',
[
'ephox.katamari.api.Fun',
'global!Error',
'global!console',
'global!document'
],
function (Fun, Error, console, document) {
var fromHtml = function (html, scope) {
var doc = scope || document;
var div = doc.createElement('div');
div.innerHTML = html;
if (!div.hasChildNodes() || div.childNodes.length > 1) {
console.error('HTML does not have a single root node', html);
throw 'HTML must have a single root node';
}
return fromDom(div.childNodes[0]);
};
var fromTag = function (tag, scope) {
var doc = scope || document;
var node = doc.createElement(tag);
return fromDom(node);
};
var fromText = function (text, scope) {
var doc = scope || document;
var node = doc.createTextNode(text);
return fromDom(node);
};
var fromDom = function (node) {
if (node === null || node === undefined) throw new Error('Node cannot be null or undefined');
return {
dom: Fun.constant(node)
};
};
return {
fromHtml: fromHtml,
fromTag: fromTag,
fromText: fromText,
fromDom: fromDom
};
}
);
define(
'ephox.sugar.api.node.NodeTypes',
[
],
function () {
return {
ATTRIBUTE: 2,
CDATA_SECTION: 4,
COMMENT: 8,
DOCUMENT: 9,
DOCUMENT_TYPE: 10,
DOCUMENT_FRAGMENT: 11,
ELEMENT: 1,
TEXT: 3,
PROCESSING_INSTRUCTION: 7,
ENTITY_REFERENCE: 5,
ENTITY: 6,
NOTATION: 12
};
}
);
define(
'ephox.sugar.api.node.Node',
[
'ephox.sugar.api.node.NodeTypes'
],
function (NodeTypes) {
var name = function (element) {
var r = element.dom().nodeName;
return r.toLowerCase();
};
var type = function (element) {
return element.dom().nodeType;
};
var value = function (element) {
return element.dom().nodeValue;
};
var isType = function (t) {
return function (element) {
return type(element) === t;
};
};
var isComment = function (element) {
return type(element) === NodeTypes.COMMENT || name(element) === '#comment';
};
var isElement = isType(NodeTypes.ELEMENT);
var isText = isType(NodeTypes.TEXT);
var isDocument = isType(NodeTypes.DOCUMENT);
return {
name: name,
type: type,
value: value,
isElement: isElement,
isText: isText,
isDocument: isDocument,
isComment: isComment
};
}
);
/**
* Html.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.Html',
[
'tinymce.plugins.visualchars.core.Data'
],
function (Data) {
var wrapCharWithSpan = function (value) {
return '<span data-mce-bogus="1" class="mce-' + Data.charMap[value] + '">' + value + '</span>';
};
return {
wrapCharWithSpan: wrapCharWithSpan
};
}
);
/**
* Nodes.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.Nodes',
[
'ephox.katamari.api.Arr',
'ephox.sugar.api.node.Element',
'ephox.sugar.api.node.Node',
'tinymce.plugins.visualchars.core.Data',
'tinymce.plugins.visualchars.core.Html'
],
function (Arr, Element, Node, Data, Html) {
var isMatch = function (n) {
return Node.isText(n) &&
Node.value(n) !== undefined &&
Data.regExp.test(Node.value(n));
};
// inlined sugars PredicateFilter.descendants for file size
var filterDescendants = function (scope, predicate) {
var result = [];
var dom = scope.dom();
var children = Arr.map(dom.childNodes, Element.fromDom);
Arr.each(children, function (x) {
if (predicate(x)) {
result = result.concat([ x ]);
}
result = result.concat(filterDescendants(x, predicate));
});
return result;
};
var findParentElm = function (elm, rootElm) {
while (elm.parentNode) {
if (elm.parentNode === rootElm) {
return elm;
}
elm = elm.parentNode;
}
};
var replaceWithSpans = function (html) {
return html.replace(Data.regExpGlobal, Html.wrapCharWithSpan);
};
return {
isMatch: isMatch,
filterDescendants: filterDescendants,
findParentElm: findParentElm,
replaceWithSpans: replaceWithSpans
};
}
);
/**
* VisualChars.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.VisualChars',
[
'tinymce.plugins.visualchars.core.Data',
'tinymce.plugins.visualchars.core.Nodes',
'ephox.katamari.api.Arr',
'ephox.sugar.api.node.Element',
'ephox.sugar.api.node.Node'
],
function (Data, Nodes, Arr, Element, Node) {
var show = function (editor, rootElm) {
var node, div;
var nodeList = Nodes.filterDescendants(Element.fromDom(rootElm), Nodes.isMatch);
Arr.each(nodeList, function (n) {
var withSpans = Nodes.replaceWithSpans(Node.value(n));
div = editor.dom.create('div', null, withSpans);
while ((node = div.lastChild)) {
editor.dom.insertAfter(node, n.dom());
}
editor.dom.remove(n.dom());
});
};
var hide = function (editor, body) {
var nodeList = editor.dom.select(Data.selector, body);
Arr.each(nodeList, function (node) {
editor.dom.remove(node, 1);
});
};
var toggle = function (editor) {
var body = editor.getBody();
var bookmark = editor.selection.getBookmark();
var parentNode = Nodes.findParentElm(editor.selection.getNode(), body);
// if user does select all the parentNode will be undefined
parentNode = parentNode !== undefined ? parentNode : body;
hide(editor, parentNode);
show(editor, parentNode);
editor.selection.moveToBookmark(bookmark);
};
return {
show: show,
hide: hide,
toggle: toggle
};
}
);
/**
* Actions.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.Actions',
[
'tinymce.plugins.visualchars.api.Events',
'tinymce.plugins.visualchars.core.VisualChars'
],
function (Events, VisualChars) {
var toggleVisualChars = function (editor, toggleState) {
var body = editor.getBody(), selection = editor.selection, bookmark;
toggleState.set(!toggleState.get());
Events.fireVisualChars(editor, toggleState.get());
bookmark = selection.getBookmark();
if (toggleState.get() === true) {
VisualChars.show(editor, body);
} else {
VisualChars.hide(editor, body);
}
selection.moveToBookmark(bookmark);
};
return {
toggleVisualChars: toggleVisualChars
};
}
);
/**
* Commands.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.api.Commands',
[
'tinymce.plugins.visualchars.core.Actions'
],
function (Actions) {
var register = function (editor, toggleState) {
editor.addCommand('mceVisualChars', function () {
Actions.toggleVisualChars(editor, toggleState);
});
};
return {
register: register
};
}
);
/**
* ResolveGlobal.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.core.util.Delay',
[
'global!tinymce.util.Tools.resolve'
],
function (resolve) {
return resolve('tinymce.util.Delay');
}
);
/**
* Keyboard.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.core.Keyboard',
[
'tinymce.core.util.Delay',
'tinymce.plugins.visualchars.core.VisualChars'
],
function (Delay, VisualChars) {
var setup = function (editor, toggleState) {
var debouncedToggle = Delay.debounce(function () {
VisualChars.toggle(editor);
}, 300);
if (editor.settings.forced_root_block !== false) {
editor.on('keydown', function (e) {
if (toggleState.get() === true) {
e.keyCode === 13 ? VisualChars.toggle(editor) : debouncedToggle();
}
});
}
};
return {
setup: setup
};
}
);
/**
* Buttons.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.ui.Buttons',
[
],
function () {
var toggleActiveState = function (editor) {
return function (e) {
var ctrl = e.control;
editor.on('VisualChars', function (e) {
ctrl.active(e.state);
});
};
};
var register = function (editor) {
editor.addButton('visualchars', {
title: 'Show invisible characters',
cmd: 'mceVisualChars',
onPostRender: toggleActiveState(editor)
});
editor.addMenuItem('visualchars', {
text: 'Show invisible characters',
cmd: 'mceVisualChars',
onPostRender: toggleActiveState(editor),
selectable: true,
context: 'view',
prependToContext: true
});
};
return {
register: register
};
}
);
/**
* Plugin.js
*
* Released under LGPL License.
* Copyright (c) 1999-2017 Ephox Corp. All rights reserved
*
* License: http://www.tinymce.com/license
* Contributing: http://www.tinymce.com/contributing
*/
define(
'tinymce.plugins.visualchars.Plugin',
[
'ephox.katamari.api.Cell',
'tinymce.core.PluginManager',
'tinymce.plugins.visualchars.api.Api',
'tinymce.plugins.visualchars.api.Commands',
'tinymce.plugins.visualchars.core.Keyboard',
'tinymce.plugins.visualchars.ui.Buttons'
],
function (Cell, PluginManager, Api, Commands, Keyboard, Buttons) {
PluginManager.add('visualchars', function (editor) {
var toggleState = Cell(false);
Commands.register(editor, toggleState);
Buttons.register(editor);
Keyboard.setup(editor, toggleState);
return Api.get(toggleState);
});
return function () {};
}
);
dem('tinymce.plugins.visualchars.Plugin')();
})(); | PypiClean |
/20220429_pdfminer_jameslp310-0.0.2-py3-none-any.whl/pdfminer/fontmetrics.py |
#
# Adobe Core 35 AFM Files with 314 Glyph Entries - ReadMe
#
# This file and the 35 PostScript(R) AFM files it accompanies may be
# used, copied, and distributed for any purpose and without charge,
# with or without modification, provided that all copyright notices
# are retained; that the AFM files are not distributed without this
# file; that all modifications to this file or any of the AFM files
# are prominently noted in the modified file(s); and that this
# paragraph is not modified. Adobe Systems has no responsibility or
# obligation to support the use of the AFM files.
#
### END Verbatim copy of the license part
# flake8: noqa
FONT_METRICS = {
"Courier": (
{
"FontName": "Courier",
"Descent": -194.0,
"FontBBox": (-6.0, -249.0, 639.0, 803.0),
"FontWeight": "Medium",
"CapHeight": 572.0,
"FontFamily": "Courier",
"Flags": 64,
"XHeight": 434.0,
"ItalicAngle": 0.0,
"Ascent": 627.0,
},
{
" ": 600,
"!": 600,
'"': 600,
"#": 600,
"$": 600,
"%": 600,
"&": 600,
"'": 600,
"(": 600,
")": 600,
"*": 600,
"+": 600,
",": 600,
"-": 600,
".": 600,
"/": 600,
"0": 600,
"1": 600,
"2": 600,
"3": 600,
"4": 600,
"5": 600,
"6": 600,
"7": 600,
"8": 600,
"9": 600,
":": 600,
";": 600,
"<": 600,
"=": 600,
">": 600,
"?": 600,
"@": 600,
"A": 600,
"B": 600,
"C": 600,
"D": 600,
"E": 600,
"F": 600,
"G": 600,
"H": 600,
"I": 600,
"J": 600,
"K": 600,
"L": 600,
"M": 600,
"N": 600,
"O": 600,
"P": 600,
"Q": 600,
"R": 600,
"S": 600,
"T": 600,
"U": 600,
"V": 600,
"W": 600,
"X": 600,
"Y": 600,
"Z": 600,
"[": 600,
"\\": 600,
"]": 600,
"^": 600,
"_": 600,
"`": 600,
"a": 600,
"b": 600,
"c": 600,
"d": 600,
"e": 600,
"f": 600,
"g": 600,
"h": 600,
"i": 600,
"j": 600,
"k": 600,
"l": 600,
"m": 600,
"n": 600,
"o": 600,
"p": 600,
"q": 600,
"r": 600,
"s": 600,
"t": 600,
"u": 600,
"v": 600,
"w": 600,
"x": 600,
"y": 600,
"z": 600,
"{": 600,
"|": 600,
"}": 600,
"~": 600,
"\xa1": 600,
"\xa2": 600,
"\xa3": 600,
"\xa4": 600,
"\xa5": 600,
"\xa6": 600,
"\xa7": 600,
"\xa8": 600,
"\xa9": 600,
"\xaa": 600,
"\xab": 600,
"\xac": 600,
"\xae": 600,
"\xaf": 600,
"\xb0": 600,
"\xb1": 600,
"\xb2": 600,
"\xb3": 600,
"\xb4": 600,
"\xb5": 600,
"\xb6": 600,
"\xb7": 600,
"\xb8": 600,
"\xb9": 600,
"\xba": 600,
"\xbb": 600,
"\xbc": 600,
"\xbd": 600,
"\xbe": 600,
"\xbf": 600,
"\xc0": 600,
"\xc1": 600,
"\xc2": 600,
"\xc3": 600,
"\xc4": 600,
"\xc5": 600,
"\xc6": 600,
"\xc7": 600,
"\xc8": 600,
"\xc9": 600,
"\xca": 600,
"\xcb": 600,
"\xcc": 600,
"\xcd": 600,
"\xce": 600,
"\xcf": 600,
"\xd0": 600,
"\xd1": 600,
"\xd2": 600,
"\xd3": 600,
"\xd4": 600,
"\xd5": 600,
"\xd6": 600,
"\xd7": 600,
"\xd8": 600,
"\xd9": 600,
"\xda": 600,
"\xdb": 600,
"\xdc": 600,
"\xdd": 600,
"\xde": 600,
"\xdf": 600,
"\xe0": 600,
"\xe1": 600,
"\xe2": 600,
"\xe3": 600,
"\xe4": 600,
"\xe5": 600,
"\xe6": 600,
"\xe7": 600,
"\xe8": 600,
"\xe9": 600,
"\xea": 600,
"\xeb": 600,
"\xec": 600,
"\xed": 600,
"\xee": 600,
"\xef": 600,
"\xf0": 600,
"\xf1": 600,
"\xf2": 600,
"\xf3": 600,
"\xf4": 600,
"\xf5": 600,
"\xf6": 600,
"\xf7": 600,
"\xf8": 600,
"\xf9": 600,
"\xfa": 600,
"\xfb": 600,
"\xfc": 600,
"\xfd": 600,
"\xfe": 600,
"\xff": 600,
"\u0100": 600,
"\u0101": 600,
"\u0102": 600,
"\u0103": 600,
"\u0104": 600,
"\u0105": 600,
"\u0106": 600,
"\u0107": 600,
"\u010c": 600,
"\u010d": 600,
"\u010e": 600,
"\u010f": 600,
"\u0110": 600,
"\u0111": 600,
"\u0112": 600,
"\u0113": 600,
"\u0116": 600,
"\u0117": 600,
"\u0118": 600,
"\u0119": 600,
"\u011a": 600,
"\u011b": 600,
"\u011e": 600,
"\u011f": 600,
"\u0122": 600,
"\u0123": 600,
"\u012a": 600,
"\u012b": 600,
"\u012e": 600,
"\u012f": 600,
"\u0130": 600,
"\u0131": 600,
"\u0136": 600,
"\u0137": 600,
"\u0139": 600,
"\u013a": 600,
"\u013b": 600,
"\u013c": 600,
"\u013d": 600,
"\u013e": 600,
"\u0141": 600,
"\u0142": 600,
"\u0143": 600,
"\u0144": 600,
"\u0145": 600,
"\u0146": 600,
"\u0147": 600,
"\u0148": 600,
"\u014c": 600,
"\u014d": 600,
"\u0150": 600,
"\u0151": 600,
"\u0152": 600,
"\u0153": 600,
"\u0154": 600,
"\u0155": 600,
"\u0156": 600,
"\u0157": 600,
"\u0158": 600,
"\u0159": 600,
"\u015a": 600,
"\u015b": 600,
"\u015e": 600,
"\u015f": 600,
"\u0160": 600,
"\u0161": 600,
"\u0162": 600,
"\u0163": 600,
"\u0164": 600,
"\u0165": 600,
"\u016a": 600,
"\u016b": 600,
"\u016e": 600,
"\u016f": 600,
"\u0170": 600,
"\u0171": 600,
"\u0172": 600,
"\u0173": 600,
"\u0178": 600,
"\u0179": 600,
"\u017a": 600,
"\u017b": 600,
"\u017c": 600,
"\u017d": 600,
"\u017e": 600,
"\u0192": 600,
"\u0218": 600,
"\u0219": 600,
"\u02c6": 600,
"\u02c7": 600,
"\u02d8": 600,
"\u02d9": 600,
"\u02da": 600,
"\u02db": 600,
"\u02dc": 600,
"\u02dd": 600,
"\u2013": 600,
"\u2014": 600,
"\u2018": 600,
"\u2019": 600,
"\u201a": 600,
"\u201c": 600,
"\u201d": 600,
"\u201e": 600,
"\u2020": 600,
"\u2021": 600,
"\u2022": 600,
"\u2026": 600,
"\u2030": 600,
"\u2039": 600,
"\u203a": 600,
"\u2044": 600,
"\u2122": 600,
"\u2202": 600,
"\u2206": 600,
"\u2211": 600,
"\u2212": 600,
"\u221a": 600,
"\u2260": 600,
"\u2264": 600,
"\u2265": 600,
"\u25ca": 600,
"\uf6c3": 600,
"\ufb01": 600,
"\ufb02": 600,
},
),
"Courier-Bold": (
{
"FontName": "Courier-Bold",
"Descent": -194.0,
"FontBBox": (-88.0, -249.0, 697.0, 811.0),
"FontWeight": "Bold",
"CapHeight": 572.0,
"FontFamily": "Courier",
"Flags": 64,
"XHeight": 434.0,
"ItalicAngle": 0.0,
"Ascent": 627.0,
},
{
" ": 600,
"!": 600,
'"': 600,
"#": 600,
"$": 600,
"%": 600,
"&": 600,
"'": 600,
"(": 600,
")": 600,
"*": 600,
"+": 600,
",": 600,
"-": 600,
".": 600,
"/": 600,
"0": 600,
"1": 600,
"2": 600,
"3": 600,
"4": 600,
"5": 600,
"6": 600,
"7": 600,
"8": 600,
"9": 600,
":": 600,
";": 600,
"<": 600,
"=": 600,
">": 600,
"?": 600,
"@": 600,
"A": 600,
"B": 600,
"C": 600,
"D": 600,
"E": 600,
"F": 600,
"G": 600,
"H": 600,
"I": 600,
"J": 600,
"K": 600,
"L": 600,
"M": 600,
"N": 600,
"O": 600,
"P": 600,
"Q": 600,
"R": 600,
"S": 600,
"T": 600,
"U": 600,
"V": 600,
"W": 600,
"X": 600,
"Y": 600,
"Z": 600,
"[": 600,
"\\": 600,
"]": 600,
"^": 600,
"_": 600,
"`": 600,
"a": 600,
"b": 600,
"c": 600,
"d": 600,
"e": 600,
"f": 600,
"g": 600,
"h": 600,
"i": 600,
"j": 600,
"k": 600,
"l": 600,
"m": 600,
"n": 600,
"o": 600,
"p": 600,
"q": 600,
"r": 600,
"s": 600,
"t": 600,
"u": 600,
"v": 600,
"w": 600,
"x": 600,
"y": 600,
"z": 600,
"{": 600,
"|": 600,
"}": 600,
"~": 600,
"\xa1": 600,
"\xa2": 600,
"\xa3": 600,
"\xa4": 600,
"\xa5": 600,
"\xa6": 600,
"\xa7": 600,
"\xa8": 600,
"\xa9": 600,
"\xaa": 600,
"\xab": 600,
"\xac": 600,
"\xae": 600,
"\xaf": 600,
"\xb0": 600,
"\xb1": 600,
"\xb2": 600,
"\xb3": 600,
"\xb4": 600,
"\xb5": 600,
"\xb6": 600,
"\xb7": 600,
"\xb8": 600,
"\xb9": 600,
"\xba": 600,
"\xbb": 600,
"\xbc": 600,
"\xbd": 600,
"\xbe": 600,
"\xbf": 600,
"\xc0": 600,
"\xc1": 600,
"\xc2": 600,
"\xc3": 600,
"\xc4": 600,
"\xc5": 600,
"\xc6": 600,
"\xc7": 600,
"\xc8": 600,
"\xc9": 600,
"\xca": 600,
"\xcb": 600,
"\xcc": 600,
"\xcd": 600,
"\xce": 600,
"\xcf": 600,
"\xd0": 600,
"\xd1": 600,
"\xd2": 600,
"\xd3": 600,
"\xd4": 600,
"\xd5": 600,
"\xd6": 600,
"\xd7": 600,
"\xd8": 600,
"\xd9": 600,
"\xda": 600,
"\xdb": 600,
"\xdc": 600,
"\xdd": 600,
"\xde": 600,
"\xdf": 600,
"\xe0": 600,
"\xe1": 600,
"\xe2": 600,
"\xe3": 600,
"\xe4": 600,
"\xe5": 600,
"\xe6": 600,
"\xe7": 600,
"\xe8": 600,
"\xe9": 600,
"\xea": 600,
"\xeb": 600,
"\xec": 600,
"\xed": 600,
"\xee": 600,
"\xef": 600,
"\xf0": 600,
"\xf1": 600,
"\xf2": 600,
"\xf3": 600,
"\xf4": 600,
"\xf5": 600,
"\xf6": 600,
"\xf7": 600,
"\xf8": 600,
"\xf9": 600,
"\xfa": 600,
"\xfb": 600,
"\xfc": 600,
"\xfd": 600,
"\xfe": 600,
"\xff": 600,
"\u0100": 600,
"\u0101": 600,
"\u0102": 600,
"\u0103": 600,
"\u0104": 600,
"\u0105": 600,
"\u0106": 600,
"\u0107": 600,
"\u010c": 600,
"\u010d": 600,
"\u010e": 600,
"\u010f": 600,
"\u0110": 600,
"\u0111": 600,
"\u0112": 600,
"\u0113": 600,
"\u0116": 600,
"\u0117": 600,
"\u0118": 600,
"\u0119": 600,
"\u011a": 600,
"\u011b": 600,
"\u011e": 600,
"\u011f": 600,
"\u0122": 600,
"\u0123": 600,
"\u012a": 600,
"\u012b": 600,
"\u012e": 600,
"\u012f": 600,
"\u0130": 600,
"\u0131": 600,
"\u0136": 600,
"\u0137": 600,
"\u0139": 600,
"\u013a": 600,
"\u013b": 600,
"\u013c": 600,
"\u013d": 600,
"\u013e": 600,
"\u0141": 600,
"\u0142": 600,
"\u0143": 600,
"\u0144": 600,
"\u0145": 600,
"\u0146": 600,
"\u0147": 600,
"\u0148": 600,
"\u014c": 600,
"\u014d": 600,
"\u0150": 600,
"\u0151": 600,
"\u0152": 600,
"\u0153": 600,
"\u0154": 600,
"\u0155": 600,
"\u0156": 600,
"\u0157": 600,
"\u0158": 600,
"\u0159": 600,
"\u015a": 600,
"\u015b": 600,
"\u015e": 600,
"\u015f": 600,
"\u0160": 600,
"\u0161": 600,
"\u0162": 600,
"\u0163": 600,
"\u0164": 600,
"\u0165": 600,
"\u016a": 600,
"\u016b": 600,
"\u016e": 600,
"\u016f": 600,
"\u0170": 600,
"\u0171": 600,
"\u0172": 600,
"\u0173": 600,
"\u0178": 600,
"\u0179": 600,
"\u017a": 600,
"\u017b": 600,
"\u017c": 600,
"\u017d": 600,
"\u017e": 600,
"\u0192": 600,
"\u0218": 600,
"\u0219": 600,
"\u02c6": 600,
"\u02c7": 600,
"\u02d8": 600,
"\u02d9": 600,
"\u02da": 600,
"\u02db": 600,
"\u02dc": 600,
"\u02dd": 600,
"\u2013": 600,
"\u2014": 600,
"\u2018": 600,
"\u2019": 600,
"\u201a": 600,
"\u201c": 600,
"\u201d": 600,
"\u201e": 600,
"\u2020": 600,
"\u2021": 600,
"\u2022": 600,
"\u2026": 600,
"\u2030": 600,
"\u2039": 600,
"\u203a": 600,
"\u2044": 600,
"\u2122": 600,
"\u2202": 600,
"\u2206": 600,
"\u2211": 600,
"\u2212": 600,
"\u221a": 600,
"\u2260": 600,
"\u2264": 600,
"\u2265": 600,
"\u25ca": 600,
"\uf6c3": 600,
"\ufb01": 600,
"\ufb02": 600,
},
),
"Courier-BoldOblique": (
{
"FontName": "Courier-BoldOblique",
"Descent": -194.0,
"FontBBox": (-49.0, -249.0, 758.0, 811.0),
"FontWeight": "Bold",
"CapHeight": 572.0,
"FontFamily": "Courier",
"Flags": 64,
"XHeight": 434.0,
"ItalicAngle": -11.0,
"Ascent": 627.0,
},
{
" ": 600,
"!": 600,
'"': 600,
"#": 600,
"$": 600,
"%": 600,
"&": 600,
"'": 600,
"(": 600,
")": 600,
"*": 600,
"+": 600,
",": 600,
"-": 600,
".": 600,
"/": 600,
"0": 600,
"1": 600,
"2": 600,
"3": 600,
"4": 600,
"5": 600,
"6": 600,
"7": 600,
"8": 600,
"9": 600,
":": 600,
";": 600,
"<": 600,
"=": 600,
">": 600,
"?": 600,
"@": 600,
"A": 600,
"B": 600,
"C": 600,
"D": 600,
"E": 600,
"F": 600,
"G": 600,
"H": 600,
"I": 600,
"J": 600,
"K": 600,
"L": 600,
"M": 600,
"N": 600,
"O": 600,
"P": 600,
"Q": 600,
"R": 600,
"S": 600,
"T": 600,
"U": 600,
"V": 600,
"W": 600,
"X": 600,
"Y": 600,
"Z": 600,
"[": 600,
"\\": 600,
"]": 600,
"^": 600,
"_": 600,
"`": 600,
"a": 600,
"b": 600,
"c": 600,
"d": 600,
"e": 600,
"f": 600,
"g": 600,
"h": 600,
"i": 600,
"j": 600,
"k": 600,
"l": 600,
"m": 600,
"n": 600,
"o": 600,
"p": 600,
"q": 600,
"r": 600,
"s": 600,
"t": 600,
"u": 600,
"v": 600,
"w": 600,
"x": 600,
"y": 600,
"z": 600,
"{": 600,
"|": 600,
"}": 600,
"~": 600,
"\xa1": 600,
"\xa2": 600,
"\xa3": 600,
"\xa4": 600,
"\xa5": 600,
"\xa6": 600,
"\xa7": 600,
"\xa8": 600,
"\xa9": 600,
"\xaa": 600,
"\xab": 600,
"\xac": 600,
"\xae": 600,
"\xaf": 600,
"\xb0": 600,
"\xb1": 600,
"\xb2": 600,
"\xb3": 600,
"\xb4": 600,
"\xb5": 600,
"\xb6": 600,
"\xb7": 600,
"\xb8": 600,
"\xb9": 600,
"\xba": 600,
"\xbb": 600,
"\xbc": 600,
"\xbd": 600,
"\xbe": 600,
"\xbf": 600,
"\xc0": 600,
"\xc1": 600,
"\xc2": 600,
"\xc3": 600,
"\xc4": 600,
"\xc5": 600,
"\xc6": 600,
"\xc7": 600,
"\xc8": 600,
"\xc9": 600,
"\xca": 600,
"\xcb": 600,
"\xcc": 600,
"\xcd": 600,
"\xce": 600,
"\xcf": 600,
"\xd0": 600,
"\xd1": 600,
"\xd2": 600,
"\xd3": 600,
"\xd4": 600,
"\xd5": 600,
"\xd6": 600,
"\xd7": 600,
"\xd8": 600,
"\xd9": 600,
"\xda": 600,
"\xdb": 600,
"\xdc": 600,
"\xdd": 600,
"\xde": 600,
"\xdf": 600,
"\xe0": 600,
"\xe1": 600,
"\xe2": 600,
"\xe3": 600,
"\xe4": 600,
"\xe5": 600,
"\xe6": 600,
"\xe7": 600,
"\xe8": 600,
"\xe9": 600,
"\xea": 600,
"\xeb": 600,
"\xec": 600,
"\xed": 600,
"\xee": 600,
"\xef": 600,
"\xf0": 600,
"\xf1": 600,
"\xf2": 600,
"\xf3": 600,
"\xf4": 600,
"\xf5": 600,
"\xf6": 600,
"\xf7": 600,
"\xf8": 600,
"\xf9": 600,
"\xfa": 600,
"\xfb": 600,
"\xfc": 600,
"\xfd": 600,
"\xfe": 600,
"\xff": 600,
"\u0100": 600,
"\u0101": 600,
"\u0102": 600,
"\u0103": 600,
"\u0104": 600,
"\u0105": 600,
"\u0106": 600,
"\u0107": 600,
"\u010c": 600,
"\u010d": 600,
"\u010e": 600,
"\u010f": 600,
"\u0110": 600,
"\u0111": 600,
"\u0112": 600,
"\u0113": 600,
"\u0116": 600,
"\u0117": 600,
"\u0118": 600,
"\u0119": 600,
"\u011a": 600,
"\u011b": 600,
"\u011e": 600,
"\u011f": 600,
"\u0122": 600,
"\u0123": 600,
"\u012a": 600,
"\u012b": 600,
"\u012e": 600,
"\u012f": 600,
"\u0130": 600,
"\u0131": 600,
"\u0136": 600,
"\u0137": 600,
"\u0139": 600,
"\u013a": 600,
"\u013b": 600,
"\u013c": 600,
"\u013d": 600,
"\u013e": 600,
"\u0141": 600,
"\u0142": 600,
"\u0143": 600,
"\u0144": 600,
"\u0145": 600,
"\u0146": 600,
"\u0147": 600,
"\u0148": 600,
"\u014c": 600,
"\u014d": 600,
"\u0150": 600,
"\u0151": 600,
"\u0152": 600,
"\u0153": 600,
"\u0154": 600,
"\u0155": 600,
"\u0156": 600,
"\u0157": 600,
"\u0158": 600,
"\u0159": 600,
"\u015a": 600,
"\u015b": 600,
"\u015e": 600,
"\u015f": 600,
"\u0160": 600,
"\u0161": 600,
"\u0162": 600,
"\u0163": 600,
"\u0164": 600,
"\u0165": 600,
"\u016a": 600,
"\u016b": 600,
"\u016e": 600,
"\u016f": 600,
"\u0170": 600,
"\u0171": 600,
"\u0172": 600,
"\u0173": 600,
"\u0178": 600,
"\u0179": 600,
"\u017a": 600,
"\u017b": 600,
"\u017c": 600,
"\u017d": 600,
"\u017e": 600,
"\u0192": 600,
"\u0218": 600,
"\u0219": 600,
"\u02c6": 600,
"\u02c7": 600,
"\u02d8": 600,
"\u02d9": 600,
"\u02da": 600,
"\u02db": 600,
"\u02dc": 600,
"\u02dd": 600,
"\u2013": 600,
"\u2014": 600,
"\u2018": 600,
"\u2019": 600,
"\u201a": 600,
"\u201c": 600,
"\u201d": 600,
"\u201e": 600,
"\u2020": 600,
"\u2021": 600,
"\u2022": 600,
"\u2026": 600,
"\u2030": 600,
"\u2039": 600,
"\u203a": 600,
"\u2044": 600,
"\u2122": 600,
"\u2202": 600,
"\u2206": 600,
"\u2211": 600,
"\u2212": 600,
"\u221a": 600,
"\u2260": 600,
"\u2264": 600,
"\u2265": 600,
"\u25ca": 600,
"\uf6c3": 600,
"\ufb01": 600,
"\ufb02": 600,
},
),
"Courier-Oblique": (
{
"FontName": "Courier-Oblique",
"Descent": -194.0,
"FontBBox": (-49.0, -249.0, 749.0, 803.0),
"FontWeight": "Medium",
"CapHeight": 572.0,
"FontFamily": "Courier",
"Flags": 64,
"XHeight": 434.0,
"ItalicAngle": -11.0,
"Ascent": 627.0,
},
{
" ": 600,
"!": 600,
'"': 600,
"#": 600,
"$": 600,
"%": 600,
"&": 600,
"'": 600,
"(": 600,
")": 600,
"*": 600,
"+": 600,
",": 600,
"-": 600,
".": 600,
"/": 600,
"0": 600,
"1": 600,
"2": 600,
"3": 600,
"4": 600,
"5": 600,
"6": 600,
"7": 600,
"8": 600,
"9": 600,
":": 600,
";": 600,
"<": 600,
"=": 600,
">": 600,
"?": 600,
"@": 600,
"A": 600,
"B": 600,
"C": 600,
"D": 600,
"E": 600,
"F": 600,
"G": 600,
"H": 600,
"I": 600,
"J": 600,
"K": 600,
"L": 600,
"M": 600,
"N": 600,
"O": 600,
"P": 600,
"Q": 600,
"R": 600,
"S": 600,
"T": 600,
"U": 600,
"V": 600,
"W": 600,
"X": 600,
"Y": 600,
"Z": 600,
"[": 600,
"\\": 600,
"]": 600,
"^": 600,
"_": 600,
"`": 600,
"a": 600,
"b": 600,
"c": 600,
"d": 600,
"e": 600,
"f": 600,
"g": 600,
"h": 600,
"i": 600,
"j": 600,
"k": 600,
"l": 600,
"m": 600,
"n": 600,
"o": 600,
"p": 600,
"q": 600,
"r": 600,
"s": 600,
"t": 600,
"u": 600,
"v": 600,
"w": 600,
"x": 600,
"y": 600,
"z": 600,
"{": 600,
"|": 600,
"}": 600,
"~": 600,
"\xa1": 600,
"\xa2": 600,
"\xa3": 600,
"\xa4": 600,
"\xa5": 600,
"\xa6": 600,
"\xa7": 600,
"\xa8": 600,
"\xa9": 600,
"\xaa": 600,
"\xab": 600,
"\xac": 600,
"\xae": 600,
"\xaf": 600,
"\xb0": 600,
"\xb1": 600,
"\xb2": 600,
"\xb3": 600,
"\xb4": 600,
"\xb5": 600,
"\xb6": 600,
"\xb7": 600,
"\xb8": 600,
"\xb9": 600,
"\xba": 600,
"\xbb": 600,
"\xbc": 600,
"\xbd": 600,
"\xbe": 600,
"\xbf": 600,
"\xc0": 600,
"\xc1": 600,
"\xc2": 600,
"\xc3": 600,
"\xc4": 600,
"\xc5": 600,
"\xc6": 600,
"\xc7": 600,
"\xc8": 600,
"\xc9": 600,
"\xca": 600,
"\xcb": 600,
"\xcc": 600,
"\xcd": 600,
"\xce": 600,
"\xcf": 600,
"\xd0": 600,
"\xd1": 600,
"\xd2": 600,
"\xd3": 600,
"\xd4": 600,
"\xd5": 600,
"\xd6": 600,
"\xd7": 600,
"\xd8": 600,
"\xd9": 600,
"\xda": 600,
"\xdb": 600,
"\xdc": 600,
"\xdd": 600,
"\xde": 600,
"\xdf": 600,
"\xe0": 600,
"\xe1": 600,
"\xe2": 600,
"\xe3": 600,
"\xe4": 600,
"\xe5": 600,
"\xe6": 600,
"\xe7": 600,
"\xe8": 600,
"\xe9": 600,
"\xea": 600,
"\xeb": 600,
"\xec": 600,
"\xed": 600,
"\xee": 600,
"\xef": 600,
"\xf0": 600,
"\xf1": 600,
"\xf2": 600,
"\xf3": 600,
"\xf4": 600,
"\xf5": 600,
"\xf6": 600,
"\xf7": 600,
"\xf8": 600,
"\xf9": 600,
"\xfa": 600,
"\xfb": 600,
"\xfc": 600,
"\xfd": 600,
"\xfe": 600,
"\xff": 600,
"\u0100": 600,
"\u0101": 600,
"\u0102": 600,
"\u0103": 600,
"\u0104": 600,
"\u0105": 600,
"\u0106": 600,
"\u0107": 600,
"\u010c": 600,
"\u010d": 600,
"\u010e": 600,
"\u010f": 600,
"\u0110": 600,
"\u0111": 600,
"\u0112": 600,
"\u0113": 600,
"\u0116": 600,
"\u0117": 600,
"\u0118": 600,
"\u0119": 600,
"\u011a": 600,
"\u011b": 600,
"\u011e": 600,
"\u011f": 600,
"\u0122": 600,
"\u0123": 600,
"\u012a": 600,
"\u012b": 600,
"\u012e": 600,
"\u012f": 600,
"\u0130": 600,
"\u0131": 600,
"\u0136": 600,
"\u0137": 600,
"\u0139": 600,
"\u013a": 600,
"\u013b": 600,
"\u013c": 600,
"\u013d": 600,
"\u013e": 600,
"\u0141": 600,
"\u0142": 600,
"\u0143": 600,
"\u0144": 600,
"\u0145": 600,
"\u0146": 600,
"\u0147": 600,
"\u0148": 600,
"\u014c": 600,
"\u014d": 600,
"\u0150": 600,
"\u0151": 600,
"\u0152": 600,
"\u0153": 600,
"\u0154": 600,
"\u0155": 600,
"\u0156": 600,
"\u0157": 600,
"\u0158": 600,
"\u0159": 600,
"\u015a": 600,
"\u015b": 600,
"\u015e": 600,
"\u015f": 600,
"\u0160": 600,
"\u0161": 600,
"\u0162": 600,
"\u0163": 600,
"\u0164": 600,
"\u0165": 600,
"\u016a": 600,
"\u016b": 600,
"\u016e": 600,
"\u016f": 600,
"\u0170": 600,
"\u0171": 600,
"\u0172": 600,
"\u0173": 600,
"\u0178": 600,
"\u0179": 600,
"\u017a": 600,
"\u017b": 600,
"\u017c": 600,
"\u017d": 600,
"\u017e": 600,
"\u0192": 600,
"\u0218": 600,
"\u0219": 600,
"\u02c6": 600,
"\u02c7": 600,
"\u02d8": 600,
"\u02d9": 600,
"\u02da": 600,
"\u02db": 600,
"\u02dc": 600,
"\u02dd": 600,
"\u2013": 600,
"\u2014": 600,
"\u2018": 600,
"\u2019": 600,
"\u201a": 600,
"\u201c": 600,
"\u201d": 600,
"\u201e": 600,
"\u2020": 600,
"\u2021": 600,
"\u2022": 600,
"\u2026": 600,
"\u2030": 600,
"\u2039": 600,
"\u203a": 600,
"\u2044": 600,
"\u2122": 600,
"\u2202": 600,
"\u2206": 600,
"\u2211": 600,
"\u2212": 600,
"\u221a": 600,
"\u2260": 600,
"\u2264": 600,
"\u2265": 600,
"\u25ca": 600,
"\uf6c3": 600,
"\ufb01": 600,
"\ufb02": 600,
},
),
"Helvetica": (
{
"FontName": "Helvetica",
"Descent": -207.0,
"FontBBox": (-166.0, -225.0, 1000.0, 931.0),
"FontWeight": "Medium",
"CapHeight": 718.0,
"FontFamily": "Helvetica",
"Flags": 0,
"XHeight": 523.0,
"ItalicAngle": 0.0,
"Ascent": 718.0,
},
{
" ": 278,
"!": 278,
'"': 355,
"#": 556,
"$": 556,
"%": 889,
"&": 667,
"'": 191,
"(": 333,
")": 333,
"*": 389,
"+": 584,
",": 278,
"-": 333,
".": 278,
"/": 278,
"0": 556,
"1": 556,
"2": 556,
"3": 556,
"4": 556,
"5": 556,
"6": 556,
"7": 556,
"8": 556,
"9": 556,
":": 278,
";": 278,
"<": 584,
"=": 584,
">": 584,
"?": 556,
"@": 1015,
"A": 667,
"B": 667,
"C": 722,
"D": 722,
"E": 667,
"F": 611,
"G": 778,
"H": 722,
"I": 278,
"J": 500,
"K": 667,
"L": 556,
"M": 833,
"N": 722,
"O": 778,
"P": 667,
"Q": 778,
"R": 722,
"S": 667,
"T": 611,
"U": 722,
"V": 667,
"W": 944,
"X": 667,
"Y": 667,
"Z": 611,
"[": 278,
"\\": 278,
"]": 278,
"^": 469,
"_": 556,
"`": 333,
"a": 556,
"b": 556,
"c": 500,
"d": 556,
"e": 556,
"f": 278,
"g": 556,
"h": 556,
"i": 222,
"j": 222,
"k": 500,
"l": 222,
"m": 833,
"n": 556,
"o": 556,
"p": 556,
"q": 556,
"r": 333,
"s": 500,
"t": 278,
"u": 556,
"v": 500,
"w": 722,
"x": 500,
"y": 500,
"z": 500,
"{": 334,
"|": 260,
"}": 334,
"~": 584,
"\xa1": 333,
"\xa2": 556,
"\xa3": 556,
"\xa4": 556,
"\xa5": 556,
"\xa6": 260,
"\xa7": 556,
"\xa8": 333,
"\xa9": 737,
"\xaa": 370,
"\xab": 556,
"\xac": 584,
"\xae": 737,
"\xaf": 333,
"\xb0": 400,
"\xb1": 584,
"\xb2": 333,
"\xb3": 333,
"\xb4": 333,
"\xb5": 556,
"\xb6": 537,
"\xb7": 278,
"\xb8": 333,
"\xb9": 333,
"\xba": 365,
"\xbb": 556,
"\xbc": 834,
"\xbd": 834,
"\xbe": 834,
"\xbf": 611,
"\xc0": 667,
"\xc1": 667,
"\xc2": 667,
"\xc3": 667,
"\xc4": 667,
"\xc5": 667,
"\xc6": 1000,
"\xc7": 722,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 278,
"\xcd": 278,
"\xce": 278,
"\xcf": 278,
"\xd0": 722,
"\xd1": 722,
"\xd2": 778,
"\xd3": 778,
"\xd4": 778,
"\xd5": 778,
"\xd6": 778,
"\xd7": 584,
"\xd8": 778,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 667,
"\xde": 667,
"\xdf": 611,
"\xe0": 556,
"\xe1": 556,
"\xe2": 556,
"\xe3": 556,
"\xe4": 556,
"\xe5": 556,
"\xe6": 889,
"\xe7": 500,
"\xe8": 556,
"\xe9": 556,
"\xea": 556,
"\xeb": 556,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 556,
"\xf1": 556,
"\xf2": 556,
"\xf3": 556,
"\xf4": 556,
"\xf5": 556,
"\xf6": 556,
"\xf7": 584,
"\xf8": 611,
"\xf9": 556,
"\xfa": 556,
"\xfb": 556,
"\xfc": 556,
"\xfd": 500,
"\xfe": 556,
"\xff": 500,
"\u0100": 667,
"\u0101": 556,
"\u0102": 667,
"\u0103": 556,
"\u0104": 667,
"\u0105": 556,
"\u0106": 722,
"\u0107": 500,
"\u010c": 722,
"\u010d": 500,
"\u010e": 722,
"\u010f": 643,
"\u0110": 722,
"\u0111": 556,
"\u0112": 667,
"\u0113": 556,
"\u0116": 667,
"\u0117": 556,
"\u0118": 667,
"\u0119": 556,
"\u011a": 667,
"\u011b": 556,
"\u011e": 778,
"\u011f": 556,
"\u0122": 778,
"\u0123": 556,
"\u012a": 278,
"\u012b": 278,
"\u012e": 278,
"\u012f": 222,
"\u0130": 278,
"\u0131": 278,
"\u0136": 667,
"\u0137": 500,
"\u0139": 556,
"\u013a": 222,
"\u013b": 556,
"\u013c": 222,
"\u013d": 556,
"\u013e": 299,
"\u0141": 556,
"\u0142": 222,
"\u0143": 722,
"\u0144": 556,
"\u0145": 722,
"\u0146": 556,
"\u0147": 722,
"\u0148": 556,
"\u014c": 778,
"\u014d": 556,
"\u0150": 778,
"\u0151": 556,
"\u0152": 1000,
"\u0153": 944,
"\u0154": 722,
"\u0155": 333,
"\u0156": 722,
"\u0157": 333,
"\u0158": 722,
"\u0159": 333,
"\u015a": 667,
"\u015b": 500,
"\u015e": 667,
"\u015f": 500,
"\u0160": 667,
"\u0161": 500,
"\u0162": 611,
"\u0163": 278,
"\u0164": 611,
"\u0165": 317,
"\u016a": 722,
"\u016b": 556,
"\u016e": 722,
"\u016f": 556,
"\u0170": 722,
"\u0171": 556,
"\u0172": 722,
"\u0173": 556,
"\u0178": 667,
"\u0179": 611,
"\u017a": 500,
"\u017b": 611,
"\u017c": 500,
"\u017d": 611,
"\u017e": 500,
"\u0192": 556,
"\u0218": 667,
"\u0219": 500,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 556,
"\u2014": 1000,
"\u2018": 222,
"\u2019": 222,
"\u201a": 222,
"\u201c": 333,
"\u201d": 333,
"\u201e": 333,
"\u2020": 556,
"\u2021": 556,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 476,
"\u2206": 612,
"\u2211": 600,
"\u2212": 584,
"\u221a": 453,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 471,
"\uf6c3": 250,
"\ufb01": 500,
"\ufb02": 500,
},
),
"Helvetica-Bold": (
{
"FontName": "Helvetica-Bold",
"Descent": -207.0,
"FontBBox": (-170.0, -228.0, 1003.0, 962.0),
"FontWeight": "Bold",
"CapHeight": 718.0,
"FontFamily": "Helvetica",
"Flags": 0,
"XHeight": 532.0,
"ItalicAngle": 0.0,
"Ascent": 718.0,
},
{
" ": 278,
"!": 333,
'"': 474,
"#": 556,
"$": 556,
"%": 889,
"&": 722,
"'": 238,
"(": 333,
")": 333,
"*": 389,
"+": 584,
",": 278,
"-": 333,
".": 278,
"/": 278,
"0": 556,
"1": 556,
"2": 556,
"3": 556,
"4": 556,
"5": 556,
"6": 556,
"7": 556,
"8": 556,
"9": 556,
":": 333,
";": 333,
"<": 584,
"=": 584,
">": 584,
"?": 611,
"@": 975,
"A": 722,
"B": 722,
"C": 722,
"D": 722,
"E": 667,
"F": 611,
"G": 778,
"H": 722,
"I": 278,
"J": 556,
"K": 722,
"L": 611,
"M": 833,
"N": 722,
"O": 778,
"P": 667,
"Q": 778,
"R": 722,
"S": 667,
"T": 611,
"U": 722,
"V": 667,
"W": 944,
"X": 667,
"Y": 667,
"Z": 611,
"[": 333,
"\\": 278,
"]": 333,
"^": 584,
"_": 556,
"`": 333,
"a": 556,
"b": 611,
"c": 556,
"d": 611,
"e": 556,
"f": 333,
"g": 611,
"h": 611,
"i": 278,
"j": 278,
"k": 556,
"l": 278,
"m": 889,
"n": 611,
"o": 611,
"p": 611,
"q": 611,
"r": 389,
"s": 556,
"t": 333,
"u": 611,
"v": 556,
"w": 778,
"x": 556,
"y": 556,
"z": 500,
"{": 389,
"|": 280,
"}": 389,
"~": 584,
"\xa1": 333,
"\xa2": 556,
"\xa3": 556,
"\xa4": 556,
"\xa5": 556,
"\xa6": 280,
"\xa7": 556,
"\xa8": 333,
"\xa9": 737,
"\xaa": 370,
"\xab": 556,
"\xac": 584,
"\xae": 737,
"\xaf": 333,
"\xb0": 400,
"\xb1": 584,
"\xb2": 333,
"\xb3": 333,
"\xb4": 333,
"\xb5": 611,
"\xb6": 556,
"\xb7": 278,
"\xb8": 333,
"\xb9": 333,
"\xba": 365,
"\xbb": 556,
"\xbc": 834,
"\xbd": 834,
"\xbe": 834,
"\xbf": 611,
"\xc0": 722,
"\xc1": 722,
"\xc2": 722,
"\xc3": 722,
"\xc4": 722,
"\xc5": 722,
"\xc6": 1000,
"\xc7": 722,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 278,
"\xcd": 278,
"\xce": 278,
"\xcf": 278,
"\xd0": 722,
"\xd1": 722,
"\xd2": 778,
"\xd3": 778,
"\xd4": 778,
"\xd5": 778,
"\xd6": 778,
"\xd7": 584,
"\xd8": 778,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 667,
"\xde": 667,
"\xdf": 611,
"\xe0": 556,
"\xe1": 556,
"\xe2": 556,
"\xe3": 556,
"\xe4": 556,
"\xe5": 556,
"\xe6": 889,
"\xe7": 556,
"\xe8": 556,
"\xe9": 556,
"\xea": 556,
"\xeb": 556,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 611,
"\xf1": 611,
"\xf2": 611,
"\xf3": 611,
"\xf4": 611,
"\xf5": 611,
"\xf6": 611,
"\xf7": 584,
"\xf8": 611,
"\xf9": 611,
"\xfa": 611,
"\xfb": 611,
"\xfc": 611,
"\xfd": 556,
"\xfe": 611,
"\xff": 556,
"\u0100": 722,
"\u0101": 556,
"\u0102": 722,
"\u0103": 556,
"\u0104": 722,
"\u0105": 556,
"\u0106": 722,
"\u0107": 556,
"\u010c": 722,
"\u010d": 556,
"\u010e": 722,
"\u010f": 743,
"\u0110": 722,
"\u0111": 611,
"\u0112": 667,
"\u0113": 556,
"\u0116": 667,
"\u0117": 556,
"\u0118": 667,
"\u0119": 556,
"\u011a": 667,
"\u011b": 556,
"\u011e": 778,
"\u011f": 611,
"\u0122": 778,
"\u0123": 611,
"\u012a": 278,
"\u012b": 278,
"\u012e": 278,
"\u012f": 278,
"\u0130": 278,
"\u0131": 278,
"\u0136": 722,
"\u0137": 556,
"\u0139": 611,
"\u013a": 278,
"\u013b": 611,
"\u013c": 278,
"\u013d": 611,
"\u013e": 400,
"\u0141": 611,
"\u0142": 278,
"\u0143": 722,
"\u0144": 611,
"\u0145": 722,
"\u0146": 611,
"\u0147": 722,
"\u0148": 611,
"\u014c": 778,
"\u014d": 611,
"\u0150": 778,
"\u0151": 611,
"\u0152": 1000,
"\u0153": 944,
"\u0154": 722,
"\u0155": 389,
"\u0156": 722,
"\u0157": 389,
"\u0158": 722,
"\u0159": 389,
"\u015a": 667,
"\u015b": 556,
"\u015e": 667,
"\u015f": 556,
"\u0160": 667,
"\u0161": 556,
"\u0162": 611,
"\u0163": 333,
"\u0164": 611,
"\u0165": 389,
"\u016a": 722,
"\u016b": 611,
"\u016e": 722,
"\u016f": 611,
"\u0170": 722,
"\u0171": 611,
"\u0172": 722,
"\u0173": 611,
"\u0178": 667,
"\u0179": 611,
"\u017a": 500,
"\u017b": 611,
"\u017c": 500,
"\u017d": 611,
"\u017e": 500,
"\u0192": 556,
"\u0218": 667,
"\u0219": 556,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 556,
"\u2014": 1000,
"\u2018": 278,
"\u2019": 278,
"\u201a": 278,
"\u201c": 500,
"\u201d": 500,
"\u201e": 500,
"\u2020": 556,
"\u2021": 556,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 494,
"\u2206": 612,
"\u2211": 600,
"\u2212": 584,
"\u221a": 549,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 494,
"\uf6c3": 250,
"\ufb01": 611,
"\ufb02": 611,
},
),
"Helvetica-BoldOblique": (
{
"FontName": "Helvetica-BoldOblique",
"Descent": -207.0,
"FontBBox": (-175.0, -228.0, 1114.0, 962.0),
"FontWeight": "Bold",
"CapHeight": 718.0,
"FontFamily": "Helvetica",
"Flags": 0,
"XHeight": 532.0,
"ItalicAngle": -12.0,
"Ascent": 718.0,
},
{
" ": 278,
"!": 333,
'"': 474,
"#": 556,
"$": 556,
"%": 889,
"&": 722,
"'": 238,
"(": 333,
")": 333,
"*": 389,
"+": 584,
",": 278,
"-": 333,
".": 278,
"/": 278,
"0": 556,
"1": 556,
"2": 556,
"3": 556,
"4": 556,
"5": 556,
"6": 556,
"7": 556,
"8": 556,
"9": 556,
":": 333,
";": 333,
"<": 584,
"=": 584,
">": 584,
"?": 611,
"@": 975,
"A": 722,
"B": 722,
"C": 722,
"D": 722,
"E": 667,
"F": 611,
"G": 778,
"H": 722,
"I": 278,
"J": 556,
"K": 722,
"L": 611,
"M": 833,
"N": 722,
"O": 778,
"P": 667,
"Q": 778,
"R": 722,
"S": 667,
"T": 611,
"U": 722,
"V": 667,
"W": 944,
"X": 667,
"Y": 667,
"Z": 611,
"[": 333,
"\\": 278,
"]": 333,
"^": 584,
"_": 556,
"`": 333,
"a": 556,
"b": 611,
"c": 556,
"d": 611,
"e": 556,
"f": 333,
"g": 611,
"h": 611,
"i": 278,
"j": 278,
"k": 556,
"l": 278,
"m": 889,
"n": 611,
"o": 611,
"p": 611,
"q": 611,
"r": 389,
"s": 556,
"t": 333,
"u": 611,
"v": 556,
"w": 778,
"x": 556,
"y": 556,
"z": 500,
"{": 389,
"|": 280,
"}": 389,
"~": 584,
"\xa1": 333,
"\xa2": 556,
"\xa3": 556,
"\xa4": 556,
"\xa5": 556,
"\xa6": 280,
"\xa7": 556,
"\xa8": 333,
"\xa9": 737,
"\xaa": 370,
"\xab": 556,
"\xac": 584,
"\xae": 737,
"\xaf": 333,
"\xb0": 400,
"\xb1": 584,
"\xb2": 333,
"\xb3": 333,
"\xb4": 333,
"\xb5": 611,
"\xb6": 556,
"\xb7": 278,
"\xb8": 333,
"\xb9": 333,
"\xba": 365,
"\xbb": 556,
"\xbc": 834,
"\xbd": 834,
"\xbe": 834,
"\xbf": 611,
"\xc0": 722,
"\xc1": 722,
"\xc2": 722,
"\xc3": 722,
"\xc4": 722,
"\xc5": 722,
"\xc6": 1000,
"\xc7": 722,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 278,
"\xcd": 278,
"\xce": 278,
"\xcf": 278,
"\xd0": 722,
"\xd1": 722,
"\xd2": 778,
"\xd3": 778,
"\xd4": 778,
"\xd5": 778,
"\xd6": 778,
"\xd7": 584,
"\xd8": 778,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 667,
"\xde": 667,
"\xdf": 611,
"\xe0": 556,
"\xe1": 556,
"\xe2": 556,
"\xe3": 556,
"\xe4": 556,
"\xe5": 556,
"\xe6": 889,
"\xe7": 556,
"\xe8": 556,
"\xe9": 556,
"\xea": 556,
"\xeb": 556,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 611,
"\xf1": 611,
"\xf2": 611,
"\xf3": 611,
"\xf4": 611,
"\xf5": 611,
"\xf6": 611,
"\xf7": 584,
"\xf8": 611,
"\xf9": 611,
"\xfa": 611,
"\xfb": 611,
"\xfc": 611,
"\xfd": 556,
"\xfe": 611,
"\xff": 556,
"\u0100": 722,
"\u0101": 556,
"\u0102": 722,
"\u0103": 556,
"\u0104": 722,
"\u0105": 556,
"\u0106": 722,
"\u0107": 556,
"\u010c": 722,
"\u010d": 556,
"\u010e": 722,
"\u010f": 743,
"\u0110": 722,
"\u0111": 611,
"\u0112": 667,
"\u0113": 556,
"\u0116": 667,
"\u0117": 556,
"\u0118": 667,
"\u0119": 556,
"\u011a": 667,
"\u011b": 556,
"\u011e": 778,
"\u011f": 611,
"\u0122": 778,
"\u0123": 611,
"\u012a": 278,
"\u012b": 278,
"\u012e": 278,
"\u012f": 278,
"\u0130": 278,
"\u0131": 278,
"\u0136": 722,
"\u0137": 556,
"\u0139": 611,
"\u013a": 278,
"\u013b": 611,
"\u013c": 278,
"\u013d": 611,
"\u013e": 400,
"\u0141": 611,
"\u0142": 278,
"\u0143": 722,
"\u0144": 611,
"\u0145": 722,
"\u0146": 611,
"\u0147": 722,
"\u0148": 611,
"\u014c": 778,
"\u014d": 611,
"\u0150": 778,
"\u0151": 611,
"\u0152": 1000,
"\u0153": 944,
"\u0154": 722,
"\u0155": 389,
"\u0156": 722,
"\u0157": 389,
"\u0158": 722,
"\u0159": 389,
"\u015a": 667,
"\u015b": 556,
"\u015e": 667,
"\u015f": 556,
"\u0160": 667,
"\u0161": 556,
"\u0162": 611,
"\u0163": 333,
"\u0164": 611,
"\u0165": 389,
"\u016a": 722,
"\u016b": 611,
"\u016e": 722,
"\u016f": 611,
"\u0170": 722,
"\u0171": 611,
"\u0172": 722,
"\u0173": 611,
"\u0178": 667,
"\u0179": 611,
"\u017a": 500,
"\u017b": 611,
"\u017c": 500,
"\u017d": 611,
"\u017e": 500,
"\u0192": 556,
"\u0218": 667,
"\u0219": 556,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 556,
"\u2014": 1000,
"\u2018": 278,
"\u2019": 278,
"\u201a": 278,
"\u201c": 500,
"\u201d": 500,
"\u201e": 500,
"\u2020": 556,
"\u2021": 556,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 494,
"\u2206": 612,
"\u2211": 600,
"\u2212": 584,
"\u221a": 549,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 494,
"\uf6c3": 250,
"\ufb01": 611,
"\ufb02": 611,
},
),
"Helvetica-Oblique": (
{
"FontName": "Helvetica-Oblique",
"Descent": -207.0,
"FontBBox": (-171.0, -225.0, 1116.0, 931.0),
"FontWeight": "Medium",
"CapHeight": 718.0,
"FontFamily": "Helvetica",
"Flags": 0,
"XHeight": 523.0,
"ItalicAngle": -12.0,
"Ascent": 718.0,
},
{
" ": 278,
"!": 278,
'"': 355,
"#": 556,
"$": 556,
"%": 889,
"&": 667,
"'": 191,
"(": 333,
")": 333,
"*": 389,
"+": 584,
",": 278,
"-": 333,
".": 278,
"/": 278,
"0": 556,
"1": 556,
"2": 556,
"3": 556,
"4": 556,
"5": 556,
"6": 556,
"7": 556,
"8": 556,
"9": 556,
":": 278,
";": 278,
"<": 584,
"=": 584,
">": 584,
"?": 556,
"@": 1015,
"A": 667,
"B": 667,
"C": 722,
"D": 722,
"E": 667,
"F": 611,
"G": 778,
"H": 722,
"I": 278,
"J": 500,
"K": 667,
"L": 556,
"M": 833,
"N": 722,
"O": 778,
"P": 667,
"Q": 778,
"R": 722,
"S": 667,
"T": 611,
"U": 722,
"V": 667,
"W": 944,
"X": 667,
"Y": 667,
"Z": 611,
"[": 278,
"\\": 278,
"]": 278,
"^": 469,
"_": 556,
"`": 333,
"a": 556,
"b": 556,
"c": 500,
"d": 556,
"e": 556,
"f": 278,
"g": 556,
"h": 556,
"i": 222,
"j": 222,
"k": 500,
"l": 222,
"m": 833,
"n": 556,
"o": 556,
"p": 556,
"q": 556,
"r": 333,
"s": 500,
"t": 278,
"u": 556,
"v": 500,
"w": 722,
"x": 500,
"y": 500,
"z": 500,
"{": 334,
"|": 260,
"}": 334,
"~": 584,
"\xa1": 333,
"\xa2": 556,
"\xa3": 556,
"\xa4": 556,
"\xa5": 556,
"\xa6": 260,
"\xa7": 556,
"\xa8": 333,
"\xa9": 737,
"\xaa": 370,
"\xab": 556,
"\xac": 584,
"\xae": 737,
"\xaf": 333,
"\xb0": 400,
"\xb1": 584,
"\xb2": 333,
"\xb3": 333,
"\xb4": 333,
"\xb5": 556,
"\xb6": 537,
"\xb7": 278,
"\xb8": 333,
"\xb9": 333,
"\xba": 365,
"\xbb": 556,
"\xbc": 834,
"\xbd": 834,
"\xbe": 834,
"\xbf": 611,
"\xc0": 667,
"\xc1": 667,
"\xc2": 667,
"\xc3": 667,
"\xc4": 667,
"\xc5": 667,
"\xc6": 1000,
"\xc7": 722,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 278,
"\xcd": 278,
"\xce": 278,
"\xcf": 278,
"\xd0": 722,
"\xd1": 722,
"\xd2": 778,
"\xd3": 778,
"\xd4": 778,
"\xd5": 778,
"\xd6": 778,
"\xd7": 584,
"\xd8": 778,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 667,
"\xde": 667,
"\xdf": 611,
"\xe0": 556,
"\xe1": 556,
"\xe2": 556,
"\xe3": 556,
"\xe4": 556,
"\xe5": 556,
"\xe6": 889,
"\xe7": 500,
"\xe8": 556,
"\xe9": 556,
"\xea": 556,
"\xeb": 556,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 556,
"\xf1": 556,
"\xf2": 556,
"\xf3": 556,
"\xf4": 556,
"\xf5": 556,
"\xf6": 556,
"\xf7": 584,
"\xf8": 611,
"\xf9": 556,
"\xfa": 556,
"\xfb": 556,
"\xfc": 556,
"\xfd": 500,
"\xfe": 556,
"\xff": 500,
"\u0100": 667,
"\u0101": 556,
"\u0102": 667,
"\u0103": 556,
"\u0104": 667,
"\u0105": 556,
"\u0106": 722,
"\u0107": 500,
"\u010c": 722,
"\u010d": 500,
"\u010e": 722,
"\u010f": 643,
"\u0110": 722,
"\u0111": 556,
"\u0112": 667,
"\u0113": 556,
"\u0116": 667,
"\u0117": 556,
"\u0118": 667,
"\u0119": 556,
"\u011a": 667,
"\u011b": 556,
"\u011e": 778,
"\u011f": 556,
"\u0122": 778,
"\u0123": 556,
"\u012a": 278,
"\u012b": 278,
"\u012e": 278,
"\u012f": 222,
"\u0130": 278,
"\u0131": 278,
"\u0136": 667,
"\u0137": 500,
"\u0139": 556,
"\u013a": 222,
"\u013b": 556,
"\u013c": 222,
"\u013d": 556,
"\u013e": 299,
"\u0141": 556,
"\u0142": 222,
"\u0143": 722,
"\u0144": 556,
"\u0145": 722,
"\u0146": 556,
"\u0147": 722,
"\u0148": 556,
"\u014c": 778,
"\u014d": 556,
"\u0150": 778,
"\u0151": 556,
"\u0152": 1000,
"\u0153": 944,
"\u0154": 722,
"\u0155": 333,
"\u0156": 722,
"\u0157": 333,
"\u0158": 722,
"\u0159": 333,
"\u015a": 667,
"\u015b": 500,
"\u015e": 667,
"\u015f": 500,
"\u0160": 667,
"\u0161": 500,
"\u0162": 611,
"\u0163": 278,
"\u0164": 611,
"\u0165": 317,
"\u016a": 722,
"\u016b": 556,
"\u016e": 722,
"\u016f": 556,
"\u0170": 722,
"\u0171": 556,
"\u0172": 722,
"\u0173": 556,
"\u0178": 667,
"\u0179": 611,
"\u017a": 500,
"\u017b": 611,
"\u017c": 500,
"\u017d": 611,
"\u017e": 500,
"\u0192": 556,
"\u0218": 667,
"\u0219": 500,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 556,
"\u2014": 1000,
"\u2018": 222,
"\u2019": 222,
"\u201a": 222,
"\u201c": 333,
"\u201d": 333,
"\u201e": 333,
"\u2020": 556,
"\u2021": 556,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 476,
"\u2206": 612,
"\u2211": 600,
"\u2212": 584,
"\u221a": 453,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 471,
"\uf6c3": 250,
"\ufb01": 500,
"\ufb02": 500,
},
),
"Symbol": (
{
"FontName": "Symbol",
"FontBBox": (-180.0, -293.0, 1090.0, 1010.0),
"FontWeight": "Medium",
"FontFamily": "Symbol",
"Flags": 0,
"ItalicAngle": 0.0,
},
{
" ": 250,
"!": 333,
"#": 500,
"%": 833,
"&": 778,
"(": 333,
")": 333,
"+": 549,
",": 250,
".": 250,
"/": 278,
"0": 500,
"1": 500,
"2": 500,
"3": 500,
"4": 500,
"5": 500,
"6": 500,
"7": 500,
"8": 500,
"9": 500,
":": 278,
";": 278,
"<": 549,
"=": 549,
">": 549,
"?": 444,
"[": 333,
"]": 333,
"_": 500,
"{": 480,
"|": 200,
"}": 480,
"\xac": 713,
"\xb0": 400,
"\xb1": 549,
"\xb5": 576,
"\xd7": 549,
"\xf7": 549,
"\u0192": 500,
"\u0391": 722,
"\u0392": 667,
"\u0393": 603,
"\u0395": 611,
"\u0396": 611,
"\u0397": 722,
"\u0398": 741,
"\u0399": 333,
"\u039a": 722,
"\u039b": 686,
"\u039c": 889,
"\u039d": 722,
"\u039e": 645,
"\u039f": 722,
"\u03a0": 768,
"\u03a1": 556,
"\u03a3": 592,
"\u03a4": 611,
"\u03a5": 690,
"\u03a6": 763,
"\u03a7": 722,
"\u03a8": 795,
"\u03b1": 631,
"\u03b2": 549,
"\u03b3": 411,
"\u03b4": 494,
"\u03b5": 439,
"\u03b6": 494,
"\u03b7": 603,
"\u03b8": 521,
"\u03b9": 329,
"\u03ba": 549,
"\u03bb": 549,
"\u03bd": 521,
"\u03be": 493,
"\u03bf": 549,
"\u03c0": 549,
"\u03c1": 549,
"\u03c2": 439,
"\u03c3": 603,
"\u03c4": 439,
"\u03c5": 576,
"\u03c6": 521,
"\u03c7": 549,
"\u03c8": 686,
"\u03c9": 686,
"\u03d1": 631,
"\u03d2": 620,
"\u03d5": 603,
"\u03d6": 713,
"\u2022": 460,
"\u2026": 1000,
"\u2032": 247,
"\u2033": 411,
"\u2044": 167,
"\u20ac": 750,
"\u2111": 686,
"\u2118": 987,
"\u211c": 795,
"\u2126": 768,
"\u2135": 823,
"\u2190": 987,
"\u2191": 603,
"\u2192": 987,
"\u2193": 603,
"\u2194": 1042,
"\u21b5": 658,
"\u21d0": 987,
"\u21d1": 603,
"\u21d2": 987,
"\u21d3": 603,
"\u21d4": 1042,
"\u2200": 713,
"\u2202": 494,
"\u2203": 549,
"\u2205": 823,
"\u2206": 612,
"\u2207": 713,
"\u2208": 713,
"\u2209": 713,
"\u220b": 439,
"\u220f": 823,
"\u2211": 713,
"\u2212": 549,
"\u2217": 500,
"\u221a": 549,
"\u221d": 713,
"\u221e": 713,
"\u2220": 768,
"\u2227": 603,
"\u2228": 603,
"\u2229": 768,
"\u222a": 768,
"\u222b": 274,
"\u2234": 863,
"\u223c": 549,
"\u2245": 549,
"\u2248": 549,
"\u2260": 549,
"\u2261": 549,
"\u2264": 549,
"\u2265": 549,
"\u2282": 713,
"\u2283": 713,
"\u2284": 713,
"\u2286": 713,
"\u2287": 713,
"\u2295": 768,
"\u2297": 768,
"\u22a5": 658,
"\u22c5": 250,
"\u2320": 686,
"\u2321": 686,
"\u2329": 329,
"\u232a": 329,
"\u25ca": 494,
"\u2660": 753,
"\u2663": 753,
"\u2665": 753,
"\u2666": 753,
"\uf6d9": 790,
"\uf6da": 790,
"\uf6db": 890,
"\uf8e5": 500,
"\uf8e6": 603,
"\uf8e7": 1000,
"\uf8e8": 790,
"\uf8e9": 790,
"\uf8ea": 786,
"\uf8eb": 384,
"\uf8ec": 384,
"\uf8ed": 384,
"\uf8ee": 384,
"\uf8ef": 384,
"\uf8f0": 384,
"\uf8f1": 494,
"\uf8f2": 494,
"\uf8f3": 494,
"\uf8f4": 494,
"\uf8f5": 686,
"\uf8f6": 384,
"\uf8f7": 384,
"\uf8f8": 384,
"\uf8f9": 384,
"\uf8fa": 384,
"\uf8fb": 384,
"\uf8fc": 494,
"\uf8fd": 494,
"\uf8fe": 494,
"\uf8ff": 790,
},
),
"Times-Bold": (
{
"FontName": "Times-Bold",
"Descent": -217.0,
"FontBBox": (-168.0, -218.0, 1000.0, 935.0),
"FontWeight": "Bold",
"CapHeight": 676.0,
"FontFamily": "Times",
"Flags": 0,
"XHeight": 461.0,
"ItalicAngle": 0.0,
"Ascent": 683.0,
},
{
" ": 250,
"!": 333,
'"': 555,
"#": 500,
"$": 500,
"%": 1000,
"&": 833,
"'": 278,
"(": 333,
")": 333,
"*": 500,
"+": 570,
",": 250,
"-": 333,
".": 250,
"/": 278,
"0": 500,
"1": 500,
"2": 500,
"3": 500,
"4": 500,
"5": 500,
"6": 500,
"7": 500,
"8": 500,
"9": 500,
":": 333,
";": 333,
"<": 570,
"=": 570,
">": 570,
"?": 500,
"@": 930,
"A": 722,
"B": 667,
"C": 722,
"D": 722,
"E": 667,
"F": 611,
"G": 778,
"H": 778,
"I": 389,
"J": 500,
"K": 778,
"L": 667,
"M": 944,
"N": 722,
"O": 778,
"P": 611,
"Q": 778,
"R": 722,
"S": 556,
"T": 667,
"U": 722,
"V": 722,
"W": 1000,
"X": 722,
"Y": 722,
"Z": 667,
"[": 333,
"\\": 278,
"]": 333,
"^": 581,
"_": 500,
"`": 333,
"a": 500,
"b": 556,
"c": 444,
"d": 556,
"e": 444,
"f": 333,
"g": 500,
"h": 556,
"i": 278,
"j": 333,
"k": 556,
"l": 278,
"m": 833,
"n": 556,
"o": 500,
"p": 556,
"q": 556,
"r": 444,
"s": 389,
"t": 333,
"u": 556,
"v": 500,
"w": 722,
"x": 500,
"y": 500,
"z": 444,
"{": 394,
"|": 220,
"}": 394,
"~": 520,
"\xa1": 333,
"\xa2": 500,
"\xa3": 500,
"\xa4": 500,
"\xa5": 500,
"\xa6": 220,
"\xa7": 500,
"\xa8": 333,
"\xa9": 747,
"\xaa": 300,
"\xab": 500,
"\xac": 570,
"\xae": 747,
"\xaf": 333,
"\xb0": 400,
"\xb1": 570,
"\xb2": 300,
"\xb3": 300,
"\xb4": 333,
"\xb5": 556,
"\xb6": 540,
"\xb7": 250,
"\xb8": 333,
"\xb9": 300,
"\xba": 330,
"\xbb": 500,
"\xbc": 750,
"\xbd": 750,
"\xbe": 750,
"\xbf": 500,
"\xc0": 722,
"\xc1": 722,
"\xc2": 722,
"\xc3": 722,
"\xc4": 722,
"\xc5": 722,
"\xc6": 1000,
"\xc7": 722,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 389,
"\xcd": 389,
"\xce": 389,
"\xcf": 389,
"\xd0": 722,
"\xd1": 722,
"\xd2": 778,
"\xd3": 778,
"\xd4": 778,
"\xd5": 778,
"\xd6": 778,
"\xd7": 570,
"\xd8": 778,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 722,
"\xde": 611,
"\xdf": 556,
"\xe0": 500,
"\xe1": 500,
"\xe2": 500,
"\xe3": 500,
"\xe4": 500,
"\xe5": 500,
"\xe6": 722,
"\xe7": 444,
"\xe8": 444,
"\xe9": 444,
"\xea": 444,
"\xeb": 444,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 500,
"\xf1": 556,
"\xf2": 500,
"\xf3": 500,
"\xf4": 500,
"\xf5": 500,
"\xf6": 500,
"\xf7": 570,
"\xf8": 500,
"\xf9": 556,
"\xfa": 556,
"\xfb": 556,
"\xfc": 556,
"\xfd": 500,
"\xfe": 556,
"\xff": 500,
"\u0100": 722,
"\u0101": 500,
"\u0102": 722,
"\u0103": 500,
"\u0104": 722,
"\u0105": 500,
"\u0106": 722,
"\u0107": 444,
"\u010c": 722,
"\u010d": 444,
"\u010e": 722,
"\u010f": 672,
"\u0110": 722,
"\u0111": 556,
"\u0112": 667,
"\u0113": 444,
"\u0116": 667,
"\u0117": 444,
"\u0118": 667,
"\u0119": 444,
"\u011a": 667,
"\u011b": 444,
"\u011e": 778,
"\u011f": 500,
"\u0122": 778,
"\u0123": 500,
"\u012a": 389,
"\u012b": 278,
"\u012e": 389,
"\u012f": 278,
"\u0130": 389,
"\u0131": 278,
"\u0136": 778,
"\u0137": 556,
"\u0139": 667,
"\u013a": 278,
"\u013b": 667,
"\u013c": 278,
"\u013d": 667,
"\u013e": 394,
"\u0141": 667,
"\u0142": 278,
"\u0143": 722,
"\u0144": 556,
"\u0145": 722,
"\u0146": 556,
"\u0147": 722,
"\u0148": 556,
"\u014c": 778,
"\u014d": 500,
"\u0150": 778,
"\u0151": 500,
"\u0152": 1000,
"\u0153": 722,
"\u0154": 722,
"\u0155": 444,
"\u0156": 722,
"\u0157": 444,
"\u0158": 722,
"\u0159": 444,
"\u015a": 556,
"\u015b": 389,
"\u015e": 556,
"\u015f": 389,
"\u0160": 556,
"\u0161": 389,
"\u0162": 667,
"\u0163": 333,
"\u0164": 667,
"\u0165": 416,
"\u016a": 722,
"\u016b": 556,
"\u016e": 722,
"\u016f": 556,
"\u0170": 722,
"\u0171": 556,
"\u0172": 722,
"\u0173": 556,
"\u0178": 722,
"\u0179": 667,
"\u017a": 444,
"\u017b": 667,
"\u017c": 444,
"\u017d": 667,
"\u017e": 444,
"\u0192": 500,
"\u0218": 556,
"\u0219": 389,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 500,
"\u2014": 1000,
"\u2018": 333,
"\u2019": 333,
"\u201a": 333,
"\u201c": 500,
"\u201d": 500,
"\u201e": 500,
"\u2020": 500,
"\u2021": 500,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 494,
"\u2206": 612,
"\u2211": 600,
"\u2212": 570,
"\u221a": 549,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 494,
"\uf6c3": 250,
"\ufb01": 556,
"\ufb02": 556,
},
),
"Times-BoldItalic": (
{
"FontName": "Times-BoldItalic",
"Descent": -217.0,
"FontBBox": (-200.0, -218.0, 996.0, 921.0),
"FontWeight": "Bold",
"CapHeight": 669.0,
"FontFamily": "Times",
"Flags": 0,
"XHeight": 462.0,
"ItalicAngle": -15.0,
"Ascent": 683.0,
},
{
" ": 250,
"!": 389,
'"': 555,
"#": 500,
"$": 500,
"%": 833,
"&": 778,
"'": 278,
"(": 333,
")": 333,
"*": 500,
"+": 570,
",": 250,
"-": 333,
".": 250,
"/": 278,
"0": 500,
"1": 500,
"2": 500,
"3": 500,
"4": 500,
"5": 500,
"6": 500,
"7": 500,
"8": 500,
"9": 500,
":": 333,
";": 333,
"<": 570,
"=": 570,
">": 570,
"?": 500,
"@": 832,
"A": 667,
"B": 667,
"C": 667,
"D": 722,
"E": 667,
"F": 667,
"G": 722,
"H": 778,
"I": 389,
"J": 500,
"K": 667,
"L": 611,
"M": 889,
"N": 722,
"O": 722,
"P": 611,
"Q": 722,
"R": 667,
"S": 556,
"T": 611,
"U": 722,
"V": 667,
"W": 889,
"X": 667,
"Y": 611,
"Z": 611,
"[": 333,
"\\": 278,
"]": 333,
"^": 570,
"_": 500,
"`": 333,
"a": 500,
"b": 500,
"c": 444,
"d": 500,
"e": 444,
"f": 333,
"g": 500,
"h": 556,
"i": 278,
"j": 278,
"k": 500,
"l": 278,
"m": 778,
"n": 556,
"o": 500,
"p": 500,
"q": 500,
"r": 389,
"s": 389,
"t": 278,
"u": 556,
"v": 444,
"w": 667,
"x": 500,
"y": 444,
"z": 389,
"{": 348,
"|": 220,
"}": 348,
"~": 570,
"\xa1": 389,
"\xa2": 500,
"\xa3": 500,
"\xa4": 500,
"\xa5": 500,
"\xa6": 220,
"\xa7": 500,
"\xa8": 333,
"\xa9": 747,
"\xaa": 266,
"\xab": 500,
"\xac": 606,
"\xae": 747,
"\xaf": 333,
"\xb0": 400,
"\xb1": 570,
"\xb2": 300,
"\xb3": 300,
"\xb4": 333,
"\xb5": 576,
"\xb6": 500,
"\xb7": 250,
"\xb8": 333,
"\xb9": 300,
"\xba": 300,
"\xbb": 500,
"\xbc": 750,
"\xbd": 750,
"\xbe": 750,
"\xbf": 500,
"\xc0": 667,
"\xc1": 667,
"\xc2": 667,
"\xc3": 667,
"\xc4": 667,
"\xc5": 667,
"\xc6": 944,
"\xc7": 667,
"\xc8": 667,
"\xc9": 667,
"\xca": 667,
"\xcb": 667,
"\xcc": 389,
"\xcd": 389,
"\xce": 389,
"\xcf": 389,
"\xd0": 722,
"\xd1": 722,
"\xd2": 722,
"\xd3": 722,
"\xd4": 722,
"\xd5": 722,
"\xd6": 722,
"\xd7": 570,
"\xd8": 722,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 611,
"\xde": 611,
"\xdf": 500,
"\xe0": 500,
"\xe1": 500,
"\xe2": 500,
"\xe3": 500,
"\xe4": 500,
"\xe5": 500,
"\xe6": 722,
"\xe7": 444,
"\xe8": 444,
"\xe9": 444,
"\xea": 444,
"\xeb": 444,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 500,
"\xf1": 556,
"\xf2": 500,
"\xf3": 500,
"\xf4": 500,
"\xf5": 500,
"\xf6": 500,
"\xf7": 570,
"\xf8": 500,
"\xf9": 556,
"\xfa": 556,
"\xfb": 556,
"\xfc": 556,
"\xfd": 444,
"\xfe": 500,
"\xff": 444,
"\u0100": 667,
"\u0101": 500,
"\u0102": 667,
"\u0103": 500,
"\u0104": 667,
"\u0105": 500,
"\u0106": 667,
"\u0107": 444,
"\u010c": 667,
"\u010d": 444,
"\u010e": 722,
"\u010f": 608,
"\u0110": 722,
"\u0111": 500,
"\u0112": 667,
"\u0113": 444,
"\u0116": 667,
"\u0117": 444,
"\u0118": 667,
"\u0119": 444,
"\u011a": 667,
"\u011b": 444,
"\u011e": 722,
"\u011f": 500,
"\u0122": 722,
"\u0123": 500,
"\u012a": 389,
"\u012b": 278,
"\u012e": 389,
"\u012f": 278,
"\u0130": 389,
"\u0131": 278,
"\u0136": 667,
"\u0137": 500,
"\u0139": 611,
"\u013a": 278,
"\u013b": 611,
"\u013c": 278,
"\u013d": 611,
"\u013e": 382,
"\u0141": 611,
"\u0142": 278,
"\u0143": 722,
"\u0144": 556,
"\u0145": 722,
"\u0146": 556,
"\u0147": 722,
"\u0148": 556,
"\u014c": 722,
"\u014d": 500,
"\u0150": 722,
"\u0151": 500,
"\u0152": 944,
"\u0153": 722,
"\u0154": 667,
"\u0155": 389,
"\u0156": 667,
"\u0157": 389,
"\u0158": 667,
"\u0159": 389,
"\u015a": 556,
"\u015b": 389,
"\u015e": 556,
"\u015f": 389,
"\u0160": 556,
"\u0161": 389,
"\u0162": 611,
"\u0163": 278,
"\u0164": 611,
"\u0165": 366,
"\u016a": 722,
"\u016b": 556,
"\u016e": 722,
"\u016f": 556,
"\u0170": 722,
"\u0171": 556,
"\u0172": 722,
"\u0173": 556,
"\u0178": 611,
"\u0179": 611,
"\u017a": 389,
"\u017b": 611,
"\u017c": 389,
"\u017d": 611,
"\u017e": 389,
"\u0192": 500,
"\u0218": 556,
"\u0219": 389,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 500,
"\u2014": 1000,
"\u2018": 333,
"\u2019": 333,
"\u201a": 333,
"\u201c": 500,
"\u201d": 500,
"\u201e": 500,
"\u2020": 500,
"\u2021": 500,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 1000,
"\u2202": 494,
"\u2206": 612,
"\u2211": 600,
"\u2212": 606,
"\u221a": 549,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 494,
"\uf6c3": 250,
"\ufb01": 556,
"\ufb02": 556,
},
),
"Times-Italic": (
{
"FontName": "Times-Italic",
"Descent": -217.0,
"FontBBox": (-169.0, -217.0, 1010.0, 883.0),
"FontWeight": "Medium",
"CapHeight": 653.0,
"FontFamily": "Times",
"Flags": 0,
"XHeight": 441.0,
"ItalicAngle": -15.5,
"Ascent": 683.0,
},
{
" ": 250,
"!": 333,
'"': 420,
"#": 500,
"$": 500,
"%": 833,
"&": 778,
"'": 214,
"(": 333,
")": 333,
"*": 500,
"+": 675,
",": 250,
"-": 333,
".": 250,
"/": 278,
"0": 500,
"1": 500,
"2": 500,
"3": 500,
"4": 500,
"5": 500,
"6": 500,
"7": 500,
"8": 500,
"9": 500,
":": 333,
";": 333,
"<": 675,
"=": 675,
">": 675,
"?": 500,
"@": 920,
"A": 611,
"B": 611,
"C": 667,
"D": 722,
"E": 611,
"F": 611,
"G": 722,
"H": 722,
"I": 333,
"J": 444,
"K": 667,
"L": 556,
"M": 833,
"N": 667,
"O": 722,
"P": 611,
"Q": 722,
"R": 611,
"S": 500,
"T": 556,
"U": 722,
"V": 611,
"W": 833,
"X": 611,
"Y": 556,
"Z": 556,
"[": 389,
"\\": 278,
"]": 389,
"^": 422,
"_": 500,
"`": 333,
"a": 500,
"b": 500,
"c": 444,
"d": 500,
"e": 444,
"f": 278,
"g": 500,
"h": 500,
"i": 278,
"j": 278,
"k": 444,
"l": 278,
"m": 722,
"n": 500,
"o": 500,
"p": 500,
"q": 500,
"r": 389,
"s": 389,
"t": 278,
"u": 500,
"v": 444,
"w": 667,
"x": 444,
"y": 444,
"z": 389,
"{": 400,
"|": 275,
"}": 400,
"~": 541,
"\xa1": 389,
"\xa2": 500,
"\xa3": 500,
"\xa4": 500,
"\xa5": 500,
"\xa6": 275,
"\xa7": 500,
"\xa8": 333,
"\xa9": 760,
"\xaa": 276,
"\xab": 500,
"\xac": 675,
"\xae": 760,
"\xaf": 333,
"\xb0": 400,
"\xb1": 675,
"\xb2": 300,
"\xb3": 300,
"\xb4": 333,
"\xb5": 500,
"\xb6": 523,
"\xb7": 250,
"\xb8": 333,
"\xb9": 300,
"\xba": 310,
"\xbb": 500,
"\xbc": 750,
"\xbd": 750,
"\xbe": 750,
"\xbf": 500,
"\xc0": 611,
"\xc1": 611,
"\xc2": 611,
"\xc3": 611,
"\xc4": 611,
"\xc5": 611,
"\xc6": 889,
"\xc7": 667,
"\xc8": 611,
"\xc9": 611,
"\xca": 611,
"\xcb": 611,
"\xcc": 333,
"\xcd": 333,
"\xce": 333,
"\xcf": 333,
"\xd0": 722,
"\xd1": 667,
"\xd2": 722,
"\xd3": 722,
"\xd4": 722,
"\xd5": 722,
"\xd6": 722,
"\xd7": 675,
"\xd8": 722,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 556,
"\xde": 611,
"\xdf": 500,
"\xe0": 500,
"\xe1": 500,
"\xe2": 500,
"\xe3": 500,
"\xe4": 500,
"\xe5": 500,
"\xe6": 667,
"\xe7": 444,
"\xe8": 444,
"\xe9": 444,
"\xea": 444,
"\xeb": 444,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 500,
"\xf1": 500,
"\xf2": 500,
"\xf3": 500,
"\xf4": 500,
"\xf5": 500,
"\xf6": 500,
"\xf7": 675,
"\xf8": 500,
"\xf9": 500,
"\xfa": 500,
"\xfb": 500,
"\xfc": 500,
"\xfd": 444,
"\xfe": 500,
"\xff": 444,
"\u0100": 611,
"\u0101": 500,
"\u0102": 611,
"\u0103": 500,
"\u0104": 611,
"\u0105": 500,
"\u0106": 667,
"\u0107": 444,
"\u010c": 667,
"\u010d": 444,
"\u010e": 722,
"\u010f": 544,
"\u0110": 722,
"\u0111": 500,
"\u0112": 611,
"\u0113": 444,
"\u0116": 611,
"\u0117": 444,
"\u0118": 611,
"\u0119": 444,
"\u011a": 611,
"\u011b": 444,
"\u011e": 722,
"\u011f": 500,
"\u0122": 722,
"\u0123": 500,
"\u012a": 333,
"\u012b": 278,
"\u012e": 333,
"\u012f": 278,
"\u0130": 333,
"\u0131": 278,
"\u0136": 667,
"\u0137": 444,
"\u0139": 556,
"\u013a": 278,
"\u013b": 556,
"\u013c": 278,
"\u013d": 611,
"\u013e": 300,
"\u0141": 556,
"\u0142": 278,
"\u0143": 667,
"\u0144": 500,
"\u0145": 667,
"\u0146": 500,
"\u0147": 667,
"\u0148": 500,
"\u014c": 722,
"\u014d": 500,
"\u0150": 722,
"\u0151": 500,
"\u0152": 944,
"\u0153": 667,
"\u0154": 611,
"\u0155": 389,
"\u0156": 611,
"\u0157": 389,
"\u0158": 611,
"\u0159": 389,
"\u015a": 500,
"\u015b": 389,
"\u015e": 500,
"\u015f": 389,
"\u0160": 500,
"\u0161": 389,
"\u0162": 556,
"\u0163": 278,
"\u0164": 556,
"\u0165": 300,
"\u016a": 722,
"\u016b": 500,
"\u016e": 722,
"\u016f": 500,
"\u0170": 722,
"\u0171": 500,
"\u0172": 722,
"\u0173": 500,
"\u0178": 556,
"\u0179": 556,
"\u017a": 389,
"\u017b": 556,
"\u017c": 389,
"\u017d": 556,
"\u017e": 389,
"\u0192": 500,
"\u0218": 500,
"\u0219": 389,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 500,
"\u2014": 889,
"\u2018": 333,
"\u2019": 333,
"\u201a": 333,
"\u201c": 556,
"\u201d": 556,
"\u201e": 556,
"\u2020": 500,
"\u2021": 500,
"\u2022": 350,
"\u2026": 889,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 980,
"\u2202": 476,
"\u2206": 612,
"\u2211": 600,
"\u2212": 675,
"\u221a": 453,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 471,
"\uf6c3": 250,
"\ufb01": 500,
"\ufb02": 500,
},
),
"Times-Roman": (
{
"FontName": "Times-Roman",
"Descent": -217.0,
"FontBBox": (-168.0, -218.0, 1000.0, 898.0),
"FontWeight": "Roman",
"CapHeight": 662.0,
"FontFamily": "Times",
"Flags": 0,
"XHeight": 450.0,
"ItalicAngle": 0.0,
"Ascent": 683.0,
},
{
" ": 250,
"!": 333,
'"': 408,
"#": 500,
"$": 500,
"%": 833,
"&": 778,
"'": 180,
"(": 333,
")": 333,
"*": 500,
"+": 564,
",": 250,
"-": 333,
".": 250,
"/": 278,
"0": 500,
"1": 500,
"2": 500,
"3": 500,
"4": 500,
"5": 500,
"6": 500,
"7": 500,
"8": 500,
"9": 500,
":": 278,
";": 278,
"<": 564,
"=": 564,
">": 564,
"?": 444,
"@": 921,
"A": 722,
"B": 667,
"C": 667,
"D": 722,
"E": 611,
"F": 556,
"G": 722,
"H": 722,
"I": 333,
"J": 389,
"K": 722,
"L": 611,
"M": 889,
"N": 722,
"O": 722,
"P": 556,
"Q": 722,
"R": 667,
"S": 556,
"T": 611,
"U": 722,
"V": 722,
"W": 944,
"X": 722,
"Y": 722,
"Z": 611,
"[": 333,
"\\": 278,
"]": 333,
"^": 469,
"_": 500,
"`": 333,
"a": 444,
"b": 500,
"c": 444,
"d": 500,
"e": 444,
"f": 333,
"g": 500,
"h": 500,
"i": 278,
"j": 278,
"k": 500,
"l": 278,
"m": 778,
"n": 500,
"o": 500,
"p": 500,
"q": 500,
"r": 333,
"s": 389,
"t": 278,
"u": 500,
"v": 500,
"w": 722,
"x": 500,
"y": 500,
"z": 444,
"{": 480,
"|": 200,
"}": 480,
"~": 541,
"\xa1": 333,
"\xa2": 500,
"\xa3": 500,
"\xa4": 500,
"\xa5": 500,
"\xa6": 200,
"\xa7": 500,
"\xa8": 333,
"\xa9": 760,
"\xaa": 276,
"\xab": 500,
"\xac": 564,
"\xae": 760,
"\xaf": 333,
"\xb0": 400,
"\xb1": 564,
"\xb2": 300,
"\xb3": 300,
"\xb4": 333,
"\xb5": 500,
"\xb6": 453,
"\xb7": 250,
"\xb8": 333,
"\xb9": 300,
"\xba": 310,
"\xbb": 500,
"\xbc": 750,
"\xbd": 750,
"\xbe": 750,
"\xbf": 444,
"\xc0": 722,
"\xc1": 722,
"\xc2": 722,
"\xc3": 722,
"\xc4": 722,
"\xc5": 722,
"\xc6": 889,
"\xc7": 667,
"\xc8": 611,
"\xc9": 611,
"\xca": 611,
"\xcb": 611,
"\xcc": 333,
"\xcd": 333,
"\xce": 333,
"\xcf": 333,
"\xd0": 722,
"\xd1": 722,
"\xd2": 722,
"\xd3": 722,
"\xd4": 722,
"\xd5": 722,
"\xd6": 722,
"\xd7": 564,
"\xd8": 722,
"\xd9": 722,
"\xda": 722,
"\xdb": 722,
"\xdc": 722,
"\xdd": 722,
"\xde": 556,
"\xdf": 500,
"\xe0": 444,
"\xe1": 444,
"\xe2": 444,
"\xe3": 444,
"\xe4": 444,
"\xe5": 444,
"\xe6": 667,
"\xe7": 444,
"\xe8": 444,
"\xe9": 444,
"\xea": 444,
"\xeb": 444,
"\xec": 278,
"\xed": 278,
"\xee": 278,
"\xef": 278,
"\xf0": 500,
"\xf1": 500,
"\xf2": 500,
"\xf3": 500,
"\xf4": 500,
"\xf5": 500,
"\xf6": 500,
"\xf7": 564,
"\xf8": 500,
"\xf9": 500,
"\xfa": 500,
"\xfb": 500,
"\xfc": 500,
"\xfd": 500,
"\xfe": 500,
"\xff": 500,
"\u0100": 722,
"\u0101": 444,
"\u0102": 722,
"\u0103": 444,
"\u0104": 722,
"\u0105": 444,
"\u0106": 667,
"\u0107": 444,
"\u010c": 667,
"\u010d": 444,
"\u010e": 722,
"\u010f": 588,
"\u0110": 722,
"\u0111": 500,
"\u0112": 611,
"\u0113": 444,
"\u0116": 611,
"\u0117": 444,
"\u0118": 611,
"\u0119": 444,
"\u011a": 611,
"\u011b": 444,
"\u011e": 722,
"\u011f": 500,
"\u0122": 722,
"\u0123": 500,
"\u012a": 333,
"\u012b": 278,
"\u012e": 333,
"\u012f": 278,
"\u0130": 333,
"\u0131": 278,
"\u0136": 722,
"\u0137": 500,
"\u0139": 611,
"\u013a": 278,
"\u013b": 611,
"\u013c": 278,
"\u013d": 611,
"\u013e": 344,
"\u0141": 611,
"\u0142": 278,
"\u0143": 722,
"\u0144": 500,
"\u0145": 722,
"\u0146": 500,
"\u0147": 722,
"\u0148": 500,
"\u014c": 722,
"\u014d": 500,
"\u0150": 722,
"\u0151": 500,
"\u0152": 889,
"\u0153": 722,
"\u0154": 667,
"\u0155": 333,
"\u0156": 667,
"\u0157": 333,
"\u0158": 667,
"\u0159": 333,
"\u015a": 556,
"\u015b": 389,
"\u015e": 556,
"\u015f": 389,
"\u0160": 556,
"\u0161": 389,
"\u0162": 611,
"\u0163": 278,
"\u0164": 611,
"\u0165": 326,
"\u016a": 722,
"\u016b": 500,
"\u016e": 722,
"\u016f": 500,
"\u0170": 722,
"\u0171": 500,
"\u0172": 722,
"\u0173": 500,
"\u0178": 722,
"\u0179": 611,
"\u017a": 444,
"\u017b": 611,
"\u017c": 444,
"\u017d": 611,
"\u017e": 444,
"\u0192": 500,
"\u0218": 556,
"\u0219": 389,
"\u02c6": 333,
"\u02c7": 333,
"\u02d8": 333,
"\u02d9": 333,
"\u02da": 333,
"\u02db": 333,
"\u02dc": 333,
"\u02dd": 333,
"\u2013": 500,
"\u2014": 1000,
"\u2018": 333,
"\u2019": 333,
"\u201a": 333,
"\u201c": 444,
"\u201d": 444,
"\u201e": 444,
"\u2020": 500,
"\u2021": 500,
"\u2022": 350,
"\u2026": 1000,
"\u2030": 1000,
"\u2039": 333,
"\u203a": 333,
"\u2044": 167,
"\u2122": 980,
"\u2202": 476,
"\u2206": 612,
"\u2211": 600,
"\u2212": 564,
"\u221a": 453,
"\u2260": 549,
"\u2264": 549,
"\u2265": 549,
"\u25ca": 471,
"\uf6c3": 250,
"\ufb01": 556,
"\ufb02": 556,
},
),
"ZapfDingbats": (
{
"FontName": "ZapfDingbats",
"FontBBox": (-1.0, -143.0, 981.0, 820.0),
"FontWeight": "Medium",
"FontFamily": "ITC",
"Flags": 0,
"ItalicAngle": 0.0,
},
{
"\x01": 974,
"\x02": 961,
"\x03": 980,
"\x04": 719,
"\x05": 789,
"\x06": 494,
"\x07": 552,
"\x08": 537,
"\t": 577,
"\n": 692,
"\x0b": 960,
"\x0c": 939,
"\r": 549,
"\x0e": 855,
"\x0f": 911,
"\x10": 933,
"\x11": 945,
"\x12": 974,
"\x13": 755,
"\x14": 846,
"\x15": 762,
"\x16": 761,
"\x17": 571,
"\x18": 677,
"\x19": 763,
"\x1a": 760,
"\x1b": 759,
"\x1c": 754,
"\x1d": 786,
"\x1e": 788,
"\x1f": 788,
" ": 790,
"!": 793,
'"': 794,
"#": 816,
"$": 823,
"%": 789,
"&": 841,
"'": 823,
"(": 833,
")": 816,
"*": 831,
"+": 923,
",": 744,
"-": 723,
".": 749,
"/": 790,
"0": 792,
"1": 695,
"2": 776,
"3": 768,
"4": 792,
"5": 759,
"6": 707,
"7": 708,
"8": 682,
"9": 701,
":": 826,
";": 815,
"<": 789,
"=": 789,
">": 707,
"?": 687,
"@": 696,
"A": 689,
"B": 786,
"C": 787,
"D": 713,
"E": 791,
"F": 785,
"G": 791,
"H": 873,
"I": 761,
"J": 762,
"K": 759,
"L": 892,
"M": 892,
"N": 788,
"O": 784,
"Q": 438,
"R": 138,
"S": 277,
"T": 415,
"U": 509,
"V": 410,
"W": 234,
"X": 234,
"Y": 390,
"Z": 390,
"[": 276,
"\\": 276,
"]": 317,
"^": 317,
"_": 334,
"`": 334,
"a": 392,
"b": 392,
"c": 668,
"d": 668,
"e": 732,
"f": 544,
"g": 544,
"h": 910,
"i": 911,
"j": 667,
"k": 760,
"l": 760,
"m": 626,
"n": 694,
"o": 595,
"p": 776,
"u": 690,
"v": 791,
"w": 790,
"x": 788,
"y": 788,
"z": 788,
"{": 788,
"|": 788,
"}": 788,
"~": 788,
"\x7f": 788,
"\x80": 788,
"\x81": 788,
"\x82": 788,
"\x83": 788,
"\x84": 788,
"\x85": 788,
"\x86": 788,
"\x87": 788,
"\x88": 788,
"\x89": 788,
"\x8a": 788,
"\x8b": 788,
"\x8c": 788,
"\x8d": 788,
"\x8e": 788,
"\x8f": 788,
"\x90": 788,
"\x91": 788,
"\x92": 788,
"\x93": 788,
"\x94": 788,
"\x95": 788,
"\x96": 788,
"\x97": 788,
"\x98": 788,
"\x99": 788,
"\x9a": 788,
"\x9b": 788,
"\x9c": 788,
"\x9d": 788,
"\x9e": 788,
"\x9f": 788,
"\xa0": 894,
"\xa1": 838,
"\xa2": 924,
"\xa3": 1016,
"\xa4": 458,
"\xa5": 924,
"\xa6": 918,
"\xa7": 927,
"\xa8": 928,
"\xa9": 928,
"\xaa": 834,
"\xab": 873,
"\xac": 828,
"\xad": 924,
"\xae": 917,
"\xaf": 930,
"\xb0": 931,
"\xb1": 463,
"\xb2": 883,
"\xb3": 836,
"\xb4": 867,
"\xb5": 696,
"\xb6": 874,
"\xb7": 760,
"\xb8": 946,
"\xb9": 865,
"\xba": 967,
"\xbb": 831,
"\xbc": 873,
"\xbd": 927,
"\xbe": 970,
"\xbf": 918,
"\xc0": 748,
"\xc1": 836,
"\xc2": 771,
"\xc3": 888,
"\xc4": 748,
"\xc5": 771,
"\xc6": 888,
"\xc7": 867,
"\xc8": 696,
"\xc9": 874,
"\xca": 974,
"\xcb": 762,
"\xcc": 759,
"\xcd": 509,
"\xce": 410,
},
),
} | PypiClean |
/ANYstructure-4.10.tar.gz/ANYstructure-4.10/any_files/example_data.py |
try:
import any_files.calc_loads as load
import any_files.calc_structure as calc_structure
import any_files.make_grid_numpy as grid
except ModuleNotFoundError:
import ANYstructure.any_files.calc_loads as load
import ANYstructure.any_files.calc_structure as calc_structure
import ANYstructure.any_files.make_grid_numpy as grid
import random
structure_types = {'vertical': ['BBS', 'SIDE_SHELL', 'SSS'],
'horizontal': ['BOTTOM', 'BBT', 'HOPPER', 'MD'],
'non-wt': ['FRAME', 'GENERAL_INTERNAL_NONWT'],
'internals': ['INNER_SIDE', 'FRAME_WT', 'GENERAL_INTERNAL_WT',
'INTERNAL_ZERO_STRESS_WT', 'INTERNAL_LOW_STRESS_WT']}
obj_dict = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.10, ''],'span': [3.3, 'm'], 'spacing': [0.68, 'm'],
'plate_thk': [0.025, 'm'],
'stf_web_height': [0.250297358, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.052, 'm'],
'stf_flange_thk': [0.029702642, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [100, 'MPa'], 'sigma_y2': [100, 'MPa'], 'sigma_x2': [102.7, 'MPa'], 'sigma_x1': [102.7, 'MPa'],
'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[1,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''], 'panel or shell': ['panel', ''],
'pressure side': ['both sides', ''], 'girder_lg': [5, 'm']}
obj_dict_cyl_long = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.6, 'm'],
'plate_thk': [0.015, 'm'],
'stf_web_height': [0.38, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_cyl_ring = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.6, 'm'],
'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.046, 'm'],
'stf_flange_thk': [0.024957, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['L-bulb', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_cyl_heavy_ring = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.6, 'm'],
'plate_thk': [0.015, 'm'],
'stf_web_height': [0.77, 'm'], 'stf_web_thk': [0.014, 'm'], 'stf_flange_width': [0.2, 'm'],
'stf_flange_thk': [0.03, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['L-bulb', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_cyl_long2 = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.65, 'm'],
'plate_thk': [0.02, 'm'],
'stf_web_height': [0.24-0.0249572753957594, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.046, 'm'],
'stf_flange_thk': [0.0249572753957594, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['L-bulb', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_cyl_ring2 = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.7, 'm'],
'plate_thk': [0.020, 'm'],
'stf_web_height': [0.3, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.12, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_cyl_heavy_ring2 = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [5, 'm'], 'spacing': [0.6, 'm'],
'plate_thk': [0.015, 'm'],
'stf_web_height': [0.7, 'm'], 'stf_web_thk': [0.016, 'm'], 'stf_flange_width': [0.2, 'm'],
'stf_flange_thk': [0.03, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['shell', ''] }
obj_dict_heavy = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [3700, 'm'], 'spacing': [0.75, 'm'],
'plate_thk': [0.018, 'm'],
'stf_web_height': [0.500, 'm'], 'stf_web_thk': [0.0120, 'm'], 'stf_flange_width': [0.150, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''], 'panel or shell': ['panel', ''],
'pressure side': ['both sides', '']}
obj_dict2 = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [4, 'm'], 'spacing': [0.7, 'm'],
'plate_thk': [0.018, 'm'],
'stf_web_height': [0.36, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [100, 'MPa'], 'sigma_y2': [100, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [50, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] }
obj_dict_sec_error = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [3.5, 'm'], 'spacing': [0.875, 'm'],
'plate_thk': [0.023, 'm'],
'stf_web_height': [0.41, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.17, 'm'],
'stf_flange_thk': [0.015, 'm'], 'structure_type': ['SIDE_SHELL', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [93, 'MPa'], 'sigma_y2': [93, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [39.7, 'MPa'], 'tau_xy': [2.8, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] }
obj_dict_L = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''], 'span': [3.6, 'm'], 'spacing': [0.82, 'm'],
'plate_thk': [0.018, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.014, 'm'], 'stf_flange_width': [0.072, 'm'],
'stf_flange_thk': [0.0439, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [0.5, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [102, 'MPa'], 'sigma_y2': [106.9, 'MPa'], 'sigma_x2': [66.8, 'MPa'], 'sigma_x1': [66.8, 'MPa'], 'tau_xy': [20, 'MPa'],
'stf_type': ['L', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''] , 'panel or shell': ['panel', ''], 'pressure side': ['both sides', ''] }
obj_dict_fr = {'mat_yield': [355e6, 'Pa'], 'mat_factor': [1.15, ''],'span': [2.5, 'm'], 'spacing': [0.74, 'm'],
'plate_thk': [0.018, 'm'],
'stf_web_height': [0.2, 'm'], 'stf_web_thk': [0.018, 'm'], 'stf_flange_width': [0, 'm'],
'stf_flange_thk': [0, 'm'], 'structure_type': ['FRAME', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [150, 'MPa'], 'sigma_y2': [92.22, 'MPa'], 'sigma_x2': [-54.566, 'MPa'], 'sigma_x1': [-54.566, 'MPa'], 'tau_xy': [16.67, 'MPa'],
'stf_type': ['FB', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''],
'puls buckling method':[2,''], 'puls boundary':['Int',''], 'puls stiffener end':['C',''],
'puls sp or up':['SP',''], 'puls up boundary' :['SSSS',''], 'panel or shell': ['panel', ''], 'pressure side': ['both sides', ''] }
point_dict = {'point5': [12.0, 2.5], 'point8': [0.0, 2.5], 'point3': [8.0, 0.0], 'point2': [4.0, 0.0],
'point6': [8.0, 2.5], 'point7': [4.0, 2.5], 'point9': [0.0, 20.0], 'point4': [12.0, 0.0],
'point10': [12.0, 20.0], 'point1': [0.0, 0.0]}
line_dict = {'line8': [9, 8], 'line6': [7, 6], 'line12': [2, 7], 'line3': [3, 4], 'line13': [3, 6], 'line1': [1, 2],
'line10': [5, 10], 'line11': [1, 8], 'line7': [7, 8], 'line9': [9, 10], 'line5': [5, 6],
'line4': [5, 4], 'line2': [3, 2]}
opt_frames = {'opt_frame1': [[2.4, 0.0], [2.4, 2.5]], 'opt_frame2': [[4.8, 0.0], [4.8, 2.5]],
'opt_frame3': [[7.2, 0.0], [7.2, 2.5]], 'opt_frame4': [[9.6, 0.0], [9.6, 2.5]],
'opt_frame_start': [[0.0, 0.0], [0.0, 2.5]], 'opt_frame_stop': [[12.0, 0.0], [12.0, 2.5]]}
fat_obj_dict = {'SN-curve': 'Ec','SCF': 1,'Design life': 20, 'n0':10000, 'Weibull': (0.8, 0.8, 0.8),
'Period': (9, 9, 9), 'Fraction': (1, 0, 0), 'CorrLoc': (0.5, 0.5, 0.5),
'Order': ('Loaded', 'Ballast', 'Part'), 'Accelerations':(0.5, 0.5, 0.5), 'DFF':2}
fat_obj_dict2 = {'SN-curve': 'Ec','SCF': 1,'Design life': 20, 'n0':10000, 'Weibull': (0.8, 0.8, 0.8),
'Period': (9, 9, 9), 'Fraction': (1, 0, 0), 'CorrLoc': (0.5, 0.5, 0.5),
'Order': ('Loaded', 'Ballast', 'Part'), 'Accelerations':(0.5, 0.5, 0.5), 'DFF':2}
fat_obj_dict_problematic = {'SN-curve': 'Ec','SCF': 1,'Design life': 20, 'n0':500571428.0, 'Weibull': (0.8, 0.8, 0),
'Period': (8, 8, 0), 'Fraction': (0.5, 0.5, 0), 'CorrLoc': (0.5, 0.5, 0),
'Order': ('Loaded', 'Ballast', 'Part'), 'Accelerations':(0.5, 0.5, 0), 'DFF':2}
loa_fls = {'static_draft':None,'poly_third':1,'poly_second':50,'poly_first':10,'poly_const':5000,'man_press':0,
'load_condition':'loaded','name_of_load':'test_load_laoded_FLS','limit_state':'FLS'}
loa_uls = {'static_draft':None,'poly_third':2,'poly_second':20,'poly_first':20,'poly_const':2000,'man_press':0,
'load_condition':'loaded','name_of_load':'test_load_loaded_ULS','limit_state':'ULS'}
bal_fls = {'static_draft':None,'poly_third':5.5,'poly_second':10,'poly_first':5.5,'poly_const':1000,'man_press':0,
'load_condition':'ballast','name_of_load':'test_load_ballast_FLS','limit_state':'FLS'}
bal_uls = {'static_draft':None,'poly_third':2,'poly_second':20,'poly_first':20,'poly_const':2000,'man_press':0,
'load_condition':'ballast','name_of_load':'test_load_ballast_ULS','limit_state':'ULS'}
tank_dict_ballast = {'acc': {'dyn_ballast': 3.0, 'dyn_loaded': 3.0, 'static': 9.81}, 'added_press': 25000.0,
'cells': 10632,'comp_no': 4, 'content': 'ballast', 'density': 1025.0, 'max_el': 20.0,
'min_el': 0.0}
comp2 = {'acc': {'static': 9.81, 'dyn_ballast': 3.0, 'dyn_loaded': 3.0}, 'max_el': 29.5, 'added_press': 25000.0,
'cells': 29591, 'density': 1025.0, 'content': 'crude_oil', 'comp_no': 2, 'min_el': 2.5}
comp3 = {'acc': {'static': 9.81, 'dyn_ballast': 3.0, 'dyn_loaded': 3.0}, 'max_el': 29.5, 'added_press': 25000.0,
'cells': 19638, 'density': 1025.0, 'content': 'crude_oil', 'comp_no': 3, 'min_el': 2.5}
comp4 = {'acc': {'static': 9.81, 'dyn_ballast': 3.0, 'dyn_loaded': 3.0}, 'max_el': 29.5, 'added_press': 25000.0,
'cells': 19072, 'density': 1025.0, 'content': 'ballast', 'comp_no': 4, 'min_el': 0.0}
load_side = {'poly_third': 0.0, 'poly_second': 303.0, 'poly_first': -3750.0, 'poly_const': 153000.0,
'load_condition': 'ballast', 'structure_type': None, 'man_press': None, 'static_draft': None,
'name_of_load': 'ballast_side', 'limit_state': 'ULS'}
load_bottom = {'poly_third': 0.0, 'poly_second': 31.0, 'poly_first': -83.0, 'poly_const': 45800.0,
'load_condition': 'ballast', 'structure_type': None, 'man_press': None, 'static_draft': None,
'name_of_load': 'ballast_bottom', 'limit_state': 'ULS'}
load_static = {'poly_third': None, 'poly_second': None, 'poly_first': None, 'poly_const': None,
'load_condition': 'ballast', 'structure_type': None, 'man_press': None, 'static_draft': 15.0,
'name_of_load': 'ballast_static', 'limit_state': 'ULS'}
load_slamming = {'poly_third': 0, 'poly_second': 0, 'poly_first': 0, 'poly_const': 1000000.0,
'load_condition': 'slamming', 'structure_type': None, 'man_press': None, 'static_draft': None,
'name_of_load': 'slamming', 'limit_state': None}
ex_comp1 = {'comp_no': 2, 'cells': 32829, 'min_el': 2.5, 'max_el': 30.9, 'content': '', 'added_press': 25000.0,
'acc': {'static': 9.81, 'dyn_loaded': 3.0, 'dyn_ballast': 3.0}, 'density': 1025.0,
'all_types': ['BOTTOM', 'BBS', 'BBT', 'HOPPER', 'SIDE_SHELL', 'INNER_SIDE', 'FRAME', 'FRAME_WT',
'SSS', 'MD', 'GENERAL_INTERNAL_WT', 'GENERAL_INTERNAL_NONWT', 'INTERNAL_1_MPA',
'INTERNAL_LOW_STRESS_WT']}
ex_comp2 = {'comp_no': 3, 'cells': 62530, 'min_el': 2.5, 'max_el': 30.900000000000002, 'content': '',
'added_press': 25000.0, 'acc': {'static': 9.81, 'dyn_loaded': 3.0, 'dyn_ballast': 3.0},
'density': 1025.0, 'all_types': ['BOTTOM', 'BBS', 'BBT', 'HOPPER', 'SIDE_SHELL', 'INNER_SIDE', 'FRAME',
'FRAME_WT', 'SSS', 'MD', 'GENERAL_INTERNAL_WT', 'GENERAL_INTERNAL_NONWT',
'INTERNAL_1_MPA', 'INTERNAL_LOW_STRESS_WT']}
ex_comp3 = {'comp_no': 4, 'cells': 14559, 'min_el': 0.0, 'max_el': 30.900000000000002, 'content': '',
'added_press': 25000.0, 'acc': {'static': 9.81, 'dyn_loaded': 3.0, 'dyn_ballast': 3.0},
'density': 1025.0, 'all_types': ['BOTTOM', 'BBS', 'BBT', 'HOPPER', 'SIDE_SHELL', 'INNER_SIDE',
'FRAME', 'FRAME_WT', 'SSS', 'MD', 'GENERAL_INTERNAL_WT',
'GENERAL_INTERNAL_NONWT', 'INTERNAL_1_MPA', 'INTERNAL_LOW_STRESS_WT']}
ex_comp4 = {'comp_no': 5, 'cells': 2785, 'min_el': 0.0, 'max_el': 2.5, 'content': '', 'added_press': 25000.0,
'acc': {'static': 9.81, 'dyn_loaded': 3.0, 'dyn_ballast': 3.0}, 'density': 1025.0,
'all_types': ['BOTTOM', 'BBS', 'BBT', 'HOPPER', 'SIDE_SHELL', 'INNER_SIDE', 'FRAME', 'FRAME_WT',
'SSS', 'MD', 'GENERAL_INTERNAL_WT', 'GENERAL_INTERNAL_NONWT', 'INTERNAL_1_MPA',
'INTERNAL_LOW_STRESS_WT']}
run_dict = {'line3': {'Identification': 'line3', 'Length of panel': 4000.0, 'Stiffener spacing': 700.0,
'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T',
'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 200.0,
'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0,
'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0,
'Yield stress stiffener': 355.0, 'Axial stress': 101.7, 'Trans. stress 1': 100.0,
'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.41261,
'In-plane support': 'Int'},
'line4': {'Identification': 'line4', 'Length of panel': 3900.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0,
'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 250.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 100.5, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.406561, 'In-plane support': 'Int'}, 'line5': {'Identification': 'line5', 'Length of panel': 3800.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 250.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 102.7, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.406575, 'In-plane support': 'Int'}, 'line6': {'Identification': 'line6', 'Length of panel': 3700.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 250.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 102.7, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.412197, 'In-plane support': 'Int'}, 'line7': {'Identification': 'line7', 'Length of panel': 3600.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 250.0, 'Flange thick.': 12.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 101.5, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.422985, 'In-plane support': 'Int'}, 'line8': {'Identification': 'line8', 'Length of panel': 3500.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 250.0, 'Flange thick.': 12.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 101.5, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.438508, 'In-plane support': 'Int'}, 'line9': {'Identification': 'line9', 'Length of panel': 3800.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 200.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 100.7, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.459639, 'In-plane support': 'Int'}, 'line10': {'Identification': 'line10', 'Length of panel': 3800.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 50.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.487211, 'In-plane support': 'Int'}, 'line11': {'Identification': 'line11', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 500.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 50.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.521418, 'In-plane support': 'Int'}, 'line12': {'Identification': 'line12', 'Length of panel': 3905.1200000000003, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 500.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 50.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.557214, 'In-plane support': 'Int'}, 'line50': {'Identification': 'line50', 'Length of panel': 3000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 200.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.300313, 'In-plane support': 'Int'}, 'line51': {'Identification': 'line51', 'Length of panel': 3199.999999999999, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 200.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.295486, 'In-plane support': 'Int'}, 'line52': {'Identification': 'line52', 'Length of panel': 3400.0000000000005, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 200.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.248226, 'In-plane support': 'Int'}, 'line53': {'Identification': 'line53', 'Length of panel': 3400.0000000000005, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 200.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.214038, 'In-plane support': 'Int'}, 'line54': {'Identification': 'line54', 'Length of panel': 3600.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.196177, 'In-plane support': 'Int'}, 'line55': {'Identification': 'line55', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.189068, 'In-plane support': 'Int'}, 'line56': {'Identification': 'line56', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.105442, 'In-plane support': 'Int'}, 'line57': {'Identification': 'line57', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 340.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 100.0, 'Trans. stress 2': 100.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.155554, 'In-plane support': 'Int'}, 'line31': {'Identification': 'line31', 'Length of panel': 4000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line32': {'Identification': 'line32', 'Length of panel': 3900.0000000000005, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line33': {'Identification': 'line33', 'Length of panel': 3799.999999999999, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line34': {'Identification': 'line34', 'Length of panel': 3699.999999999999, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line35': {'Identification': 'line35', 'Length of panel': 3600.0000000000014, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line36': {'Identification': 'line36', 'Length of panel': 3500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line37': {'Identification': 'line37', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line38': {'Identification': 'line38', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line39': {'Identification': 'line39', 'Length of panel': 4000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line40': {'Identification': 'line40', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 14.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 3.0, 'Pressure (fixed)': 0.0325, 'In-plane support': 'Int'}, 'line13': {'Identification': 'line13', 'Length of panel': 4000.0, 'Stiffener spacing': 775.0, 'Plate thickness': 20.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 450.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line14': {'Identification': 'line14', 'Length of panel': 4000.0, 'Stiffener spacing': 775.0, 'Plate thickness': 20.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 450.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line15': {'Identification': 'line15', 'Length of panel': 4000.0, 'Stiffener spacing': 775.0, 'Plate thickness': 20.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 450.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line16': {'Identification': 'line16', 'Length of panel': 3699.999999999999, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 375.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line17': {'Identification': 'line17', 'Length of panel': 3600.0000000000014, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 375.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line18': {'Identification': 'line18', 'Length of panel': 3500.0, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 375.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line19': {'Identification': 'line19', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 375.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line20': {'Identification': 'line20', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 375.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 18.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line41': {'Identification': 'line41', 'Length of panel': 5000.0, 'Stiffener spacing': 775.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 500.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 25.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.436313, 'In-plane support': 'Int'}, 'line43': {'Identification': 'line43', 'Length of panel': 3199.999999999999, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.393657, 'In-plane support': 'Int'}, 'line44': {'Identification': 'line44', 'Length of panel': 3400.0000000000005, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.348157, 'In-plane support': 'Int'}, 'line45': {'Identification': 'line45', 'Length of panel': 3400.0000000000005, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.299813, 'In-plane support': 'Int'}, 'line46': {'Identification': 'line46', 'Length of panel': 3600.0000000000014, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.251469, 'In-plane support': 'Int'}, 'line47': {'Identification': 'line47', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.200281, 'In-plane support': 'Int'}, 'line48': {'Identification': 'line48', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.14625, 'In-plane support': 'Int'}, 'line49': {'Identification': 'line49', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 325.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 16.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 80.0, 'Trans. stress 2': 80.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.089375, 'In-plane support': 'Int'}, 'line58': {'Identification': 'line58', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line59': {'Identification': 'line59', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line61': {'Identification': 'line61', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line62': {'Identification': 'line62', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line63': {'Identification': 'line63', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line64': {'Identification': 'line64', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line65': {'Identification': 'line65', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line66': {'Identification': 'line66', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line21': {'Identification': 'line21', 'Length of panel': 4000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line42': {'Identification': 'line42', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line22': {'Identification': 'line22', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line67': {'Identification': 'line67', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line68': {'Identification': 'line68', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line69': {'Identification': 'line69', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line70': {'Identification': 'line70', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line71': {'Identification': 'line71', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line72': {'Identification': 'line72', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line73': {'Identification': 'line73', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line60': {'Identification': 'line60', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 0.0, 'Flange thick.': 0.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 60.0, 'Trans. stress 1': 70.0, 'Trans. stress 2': 70.0, 'Shear stress': 10.0, 'Pressure (fixed)': 0.0, 'In-plane support': 'Int'}, 'line1': {'Identification': 'line1', 'Length of panel': 2500.0, 'Stiffener spacing': 700.0, 'Plate thickness': 14.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 250.0, 'Web thick.': 18.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 20.0, 'Trans. stress 1': 40.0, 'Trans. stress 2': 40.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.47186, 'In-plane support': 'Int'}, 'line2': {'Identification': 'line2', 'Length of panel': 3000.0, 'Stiffener spacing': 700.0, 'Plate thickness': 16.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'F', 'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 18.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 20.0, 'Trans. stress 1': 40.0, 'Trans. stress 2': 40.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.387068, 'In-plane support': 'Int'}, 'line23': {'Identification': 'line23', 'Length of panel': 3000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 15.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 350.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.387068, 'In-plane support': 'Int'}, 'line24': {'Identification': 'line24', 'Length of panel': 3200.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 350.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.349613, 'In-plane support': 'Int'}, 'line25': {'Identification': 'line25', 'Length of panel': 3400.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 350.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.309662, 'In-plane support': 'Int'}, 'line26': {'Identification': 'line26', 'Length of panel': 3400.0, 'Stiffener spacing': 750.0, 'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 320.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.267214, 'In-plane support': 'Int'}, 'line27': {'Identification': 'line27', 'Length of panel': 3600.0000000000014, 'Stiffener spacing': 750.0, 'Plate thickness': 15.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 320.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.224765, 'In-plane support': 'Int'}, 'line28': {'Identification': 'line28', 'Length of panel': 3800.000000000001, 'Stiffener spacing': 750.0, 'Plate thickness': 15.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 320.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.17982, 'In-plane support': 'Int'}, 'line29': {'Identification': 'line29', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 15.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 300.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.132378, 'In-plane support': 'Int'}, 'line30': {'Identification': 'line30', 'Length of panel': 4000.0, 'Stiffener spacing': 750.0, 'Plate thickness': 15.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T', 'Stiffener boundary': 'C', 'Stiff. Height': 300.0, 'Web thick.': 12.0, 'Flange width': 150.0, 'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0, 'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0, 'Yield stress stiffener': 355.0, 'Axial stress': 40.0, 'Trans. stress 1': 90.0, 'Trans. stress 2': 90.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.082439, 'In-plane support': 'Int'}}
run_dict_one = {'line3': {'Identification': 'line3', 'Length of panel': 4000.0, 'Stiffener spacing': 700.0,
'Plate thickness': 18.0, 'Number of primary stiffeners': 10, 'Stiffener type (L,T,F)': 'T',
'Stiffener boundary': 'C', 'Stiff. Height': 400.0, 'Web thick.': 12.0, 'Flange width': 200.0,
'Flange thick.': 20.0, 'Tilt angle': 0, 'Number of sec. stiffeners': 0,
'Modulus of elasticity': 210000.0, "Poisson's ratio": 0.3, 'Yield stress plate': 355.0,
'Yield stress stiffener': 355.0, 'Axial stress': 101.7, 'Trans. stress 1': 100.0,
'Trans. stress 2': 100.0, 'Shear stress': 5.0, 'Pressure (fixed)': 0.41261,
'In-plane support': 'Int'}}
shell_dict = {'plate_thk': [20 / 1000, 'm'],
'radius': [5000 / 1000, 'm'],
'distance between rings, l': [700 / 1000, 'm'],
'length of shell, L': [5000 / 1000, 'm'],
'tot cyl length, Lc': [5000 / 1000, 'm'],
'eff. buckling lenght factor': [1, ''],
'mat_yield': [355 * 1e6, 'Pa'],
}
shell_main_dict = {'sasd': [-10e6, 'Pa'],
'smsd': [-10e6, 'Pa'],
'tTsd': [40* 1e6, 'Pa'],
'tQsd': [40* 1e6, 'Pa'],
'psd': [-0.1e6, 'Pa'],
'shsd': [0, 'Pa'],
'geometry': [3, '-'],
'material factor': [1.15, ''],
'delta0': [0.005, ''],
'fab method ring stf': [1, ''],
'fab method ring girder': [1, ''],
'E-module': [2.1e11, 'Pa'],
'poisson': [0.3, '-'],
'mat_yield': [355 * 1e6, 'Pa'],
'length between girders' : [None, 'm'],
'panel spacing, s' : [2, 'm'],
'ring stf excluded' : [False, ''],
'ring frame excluded' : [True, ''],
'end cap pressure': ['not included in axial stresses', ''],
'ULS or ALS': ['ULS', '']}
'''
self._length_between_girders = main_dict['length between girders'][0]
self._panel_spacing = main_dict['panel spacing, s'][0]
self.__ring_stiffener_excluded = main_dict['ring stf excluded'][0]
self.__ring_frame_excluded = main_dict['ring frame excluded'][0]'''
shell_main_dict2 = {'sasd': [79.58 * 1e6, 'Pa'],
'smsd': [31.89* 1e6, 'Pa'],
'tTsd': [12.73* 1e6, 'Pa'],
'tQsd': [4.77* 1e6, 'Pa'],
'psd': [-0.2* 1e6, 'Pa'],
'shsd': [0, 'Pa'],
'geometry': [5, '-'],
'material factor': [1.15, ''],
'delta0': [0.005, ''],
'fab method ring stf': [1, ''],
'fab method ring girder': [1, ''],
'E-module': [2.1e11, 'Pa'],
'poisson': [0.3, '-'],
'mat_yield': [355 * 1e6, 'Pa'],
'length between girders': [None, 'm'],
'panel spacing, s': [0.7, 'm'],
'ring stf excluded': [False, ''],
'ring frame excluded': [True, ''],
'end cap pressure': ['not included in axial stresses', ''],
'ULS or ALS': ['ULS', '']}
prescriptive_main_dict = dict()
prescriptive_main_dict['minimum pressure in adjacent spans'] = [None, '']
prescriptive_main_dict['material yield'] = [355e6, 'Pa']
prescriptive_main_dict['load factor on stresses'] = [1, '']
prescriptive_main_dict['load factor on pressure'] = [1, '']
prescriptive_main_dict['buckling method'] = ['ultimate', '']
prescriptive_main_dict['stiffener end support'] = ['Continuous', ''] # 'Continuous'
prescriptive_main_dict['girder end support'] = ['Continuous', ''] # 'Continuous'
prescriptive_main_dict['tension field'] = ['not allowed', ''] # 'not allowed'
prescriptive_main_dict['plate effective agains sigy'] = [True, ''] # True
prescriptive_main_dict['buckling length factor stf'] = [None, '']
prescriptive_main_dict['buckling length factor girder'] = [None, '']
prescriptive_main_dict['km3'] = [12, ''] # 12
prescriptive_main_dict['km2'] = [24, ''] # 24
prescriptive_main_dict['girder distance between lateral support'] = [None, '']
prescriptive_main_dict['stiffener distance between lateral support'] = [None, '']
prescriptive_main_dict['kgirder'] = [None, '']
prescriptive_main_dict['panel length, Lp'] = [None, '']
prescriptive_main_dict['pressure side'] = ['both sides', '']# either 'stiffener', 'plate', 'both'
prescriptive_main_dict['fabrication method stiffener'] = ['welded', '']
prescriptive_main_dict['fabrication method girder'] = ['welded', '']
prescriptive_main_dict['calculation domain'] = ['Flat plate, stiffened', '']
def get_slamming_pressure():
return 1000000
def get_fatigue_pressures():
return {'p_ext':{'loaded':50000,'ballast':60000,'part':0}, 'p_int':{'loaded':0, 'ballast':20000,'part':0}}
def get_fatigue_pressures_problematic():
return {'p_ext': {'loaded': 192632, 'ballast': 198705.5, 'part': 0},
'p_int': {'loaded': 0, 'ballast': 15118, 'part': 0}}
def get_loa_fls_load():
return load.Loads(loa_fls)
def get_loa_uls_load():
return load.Loads(loa_uls)
def get_bal_fls_load():
return load.Loads(bal_fls)
def get_bal_uls_load():
return load.Loads(bal_uls)
def get_object_dictionary():
return obj_dict
def get_structure_object(line=None):
if line in ('line12','line13','line11','line4'):
return calc_structure.CalcScantlings(obj_dict_fr)
else:
return calc_structure.CalcScantlings(obj_dict)
def get_structure_calc_object(line=None, heavy = False):
if line in ('line12','line13','line11','line4'):
return calc_structure.CalcScantlings(obj_dict_fr)
else:
return calc_structure.CalcScantlings(obj_dict if not heavy else obj_dict_heavy)
def get_fatigue_object():
return calc_structure.CalcFatigue(obj_dict, fat_obj_dict)
def get_fatigue_object_problematic():
return calc_structure.CalcFatigue(obj_dict_sec_error, fat_obj_dict_problematic)
def get_tank_object():
return load.Tanks(tank_dict=tank_dict_ballast)
def get_line_to_struc(geo = False):
to_return = {}
for line in line_dict.keys():
Plate = get_structure_object(line)
Stiffener = get_structure_object(line)
Girder = None # CalcScantlings(ex.obj_dict_heavy)
initial_calc_obj = calc_structure.AllStructure(Plate=Plate, Stiffener=Stiffener, Girder=Girder,
main_dict=prescriptive_main_dict)
to_return[line]=[initial_calc_obj, None, None, [None], {}]
return to_return
def get_default_stresses():
return {'BOTTOM':(100,100,50,50,5), 'BBS':(70,70,30,30,3), 'BBT':(80,80,30,3), 'HOPPER':(70,70,50,50,3),
'SIDE_SHELL':(100,100,40,40,3),'INNER_SIDE':(80,80,40,40,5), 'FRAME':(70,70,60,0,10),
'FRAME_WT':(70,70,60,0,10),'SSS':(100,100,50,50,20), 'MD':(70,70,4,40,3),
'GENERAL_INTERNAL_WT':(90,90,40,40,5),'GENERAL_INTERNAL_NONWT':(70,70,30,30,3),
'INTERNAL_1_MPA':(1,1,1,1,1), 'INTERNAL_LOW_STRESS_WT':(40,40,20,20,5)}
def get_opt_frames():
return opt_frames,['point1', 'point4', 'point8', 'point5']
def get_point_dict():
return point_dict
def get_line_dict():
return line_dict
def get_grid(origo,base_canvas_dim):
return grid.Grid(origo[1] + 1, base_canvas_dim[0] - origo[0] + 1)
def get_grid_no_inp(empty_grid = False):
origo = (50,670)
base_canvas_dim = [1000,720]
grid_return = grid.Grid(origo[1] + 1, base_canvas_dim[0] - origo[0] + 1)
if empty_grid:
return grid_return
for line,coords in get_to_draw().items():
for point in grid_return.get_points_along_line(coords[0],coords[1]):
grid_return.set_barrier(point[0],point[1])
return grid_return
def get_grid_empty():
origo = (50,670)
base_canvas_dim = [1000,720]
grid_return = grid.Grid(origo[1] + 1, base_canvas_dim[0] - origo[0] + 1)
return grid_return
def get_to_draw():
to_return = {}
for line in line_dict.keys():
p1 = line_dict[line][0]
p2 = line_dict[line][1]
p1_coord = point_dict['point'+str(p1)]
p2_coord = point_dict['point'+str(p2)]
point_coord = (p1_coord,p2_coord)
to_return[line]= get_grid_coord_from_points_coords(point_coord[0]),\
get_grid_coord_from_points_coords(point_coord[1])
return to_return
def get_geo_opt_presure():
return (200,200,200,200,200,200)
def get_random_pressure():
return 150 + 100*random.random()
def get_random_color():
return random.choice(['red','green','green','green'])
def get_geo_opt_object():
dicts = ({'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''], 'stf_kps': [1, ''],
'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''], 'sigma_y1': [80, 'MPa'],
'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'], 'stf_type': ['T', ''],
'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] },
{'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''],
'stf_kps': [1, ''], 'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''],
'sigma_y1': [80, 'MPa'], 'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'],
'stf_type': ['T', ''], 'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] },
{'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['BOTTOM', ''], 'plate_kpp': [1, ''], 'stf_kps': [1, ''],
'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''], 'sigma_y1': [80, 'MPa'],
'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'], 'stf_type': ['T', ''],
'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] },
{'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['GENERAL_INTERNAL_WT', ''], 'plate_kpp': [1, ''], 'stf_kps': [1, ''],
'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''], 'sigma_y1': [80, 'MPa'],
'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'], 'stf_type': ['T', ''],
'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] },
{'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['GENERAL_INTERNAL_WT', ''], 'plate_kpp': [1, ''], 'stf_kps': [1, ''],
'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''], 'sigma_y1': [80, 'MPa'],
'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'], 'stf_type': ['T', ''],
'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] },
{'mat_yield': [355000000.0, 'Pa'], 'span': [4.0, 'm'], 'spacing': [0.7, 'm'], 'plate_thk': [0.015, 'm'],
'stf_web_height': [0.4, 'm'], 'stf_web_thk': [0.012, 'm'], 'stf_flange_width': [0.15, 'm'],
'stf_flange_thk': [0.02, 'm'], 'structure_type': ['GENERAL_INTERNAL_WT', ''], 'plate_kpp': [1, ''], 'stf_kps': [1, ''],
'stf_km1': [12, ''], 'stf_km2': [24, ''], 'stf_km3': [12, ''], 'sigma_y1': [80, 'MPa'],
'sigma_y2': [80, 'MPa'], 'sigma_x2': [80, 'MPa'], 'sigma_x1': [80, 'MPa'], 'tau_xy': [5, 'MPa'], 'stf_type': ['T', ''],
'structure_types': [structure_types, ''], 'zstar_optimization': [True, ''] })
return [calc_structure.CalcScantlings(dic) for dic in dicts]
def get_geo_opt_fatigue():
return [get_fatigue_object() for dummy in range(len(get_geo_opt_presure()))]
def get_geo_opt_fat_press():
return [get_fatigue_pressures() for dummy in range(len(get_geo_opt_presure()))]
def get_geo_opt_fat_press():
return [get_fatigue_pressures() for dummy in range(len(get_geo_opt_presure()))]
def get_geo_opt_slamming_none():
return [0 for dummy in range(len(get_geo_opt_presure()))]
def get_geo_opt_slamming():
return [get_slamming_pressure() for dummy in range(len(get_geo_opt_presure()))]
def get_grid_coord_from_points_coords(point_coord):
'''
Converts coordinates to be used in the grid. Returns (row,col). This value will not change with slider.
:param point:
:return:
'''
canvas_origo = (50,670)
row = canvas_origo[1] - point_coord[1]*10
col = point_coord[0]*10
return (row,col)
def get_section_list():
''' Returning a section list. '''
import pl_stf_window as plstf
return [plstf.Section(obj_dict), plstf.Section(obj_dict2), plstf.Section(obj_dict_L)]
if __name__ == '__main__':
print(get_random_color()) | PypiClean |
/GNN4LP-0.1.0-py3-none-any.whl/src/graph_att_gan/predict.py | import os
from configparser import ConfigParser
import numpy as np
import scipy.sparse as sp
import sys
sys.path.append(r'/home/shiyan/project/gnn4lp/')
from src.util.load_data import load_data_with_features, load_data_without_features
class Predict():
def __init__(self):
self.hidden_emb = None
self.adj_orig = None
def load_model_adj(self, config_path):
'''
load hidden_emb and adj
:param config_path:
:return:
'''
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# node cites path
node_cites_path = config.get(section, "node_cites_path")
node_cites_path = os.path.join(data_catalog, node_cites_path)
# node features path
node_features_path = config.get(section, 'node_features_path')
node_features_path = os.path.join(data_catalog, node_features_path)
# 是否带节点特征
with_feats = config.getboolean(section, 'with_feats')
# model save/load path
model_path = config.get(section, "model_path")
if not os.path.exists(model_path):
raise FileNotFoundError('Not found model file!')
if not os.path.exists(node_cites_path):
raise FileNotFoundError('Not found node_cites_file!')
self.hidden_emb = np.load(model_path)
if with_feats:
if not os.path.exists(os.path.join(data_catalog, node_features_path)):
raise FileNotFoundError('Not found node_features_file!')
adj, _ = load_data_with_features(node_cites_path, node_features_path)
else:
adj = load_data_without_features(node_cites_path)
# 除去对角线元素
self.adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
self.adj_orig.eliminate_zeros()
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
def predict(self):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 内积
adj_rec = np.dot(self.hidden_emb, self.hidden_emb.T)
adj_rec = sigmoid(adj_rec)
return self.adj_orig, adj_rec
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
predict = Predict()
predict.load_model_adj(config_path)
adj_orig, adj_rec = predict.predict()
adj_rec = (adj_rec > 0.5) + 0
print('adj_orig: {}, \n adj_rec: {}'.format(adj_orig, adj_rec[0][:100])) | PypiClean |
/Dans_Diffraction-3.0.0-py3-none-any.whl/Dans_Diffraction/functions_plotting.py | import sys, os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from . import functions_general as fg
from . import functions_crystallography as fc
__version__ = '2.1'
DEFAULT_FONT = 'Times New Roman'
DEFAULT_FONTSIZE = 14
FIGURE_SIZE = [12, 8]
FIGURE_DPI = 80
'----------------------------Plot manipulation--------------------------'
def set_plot_defaults(rcdefaults=False):
"""
Set custom matplotlib rcparams, or revert to matplotlib defaults
These handle the default look of matplotlib plots
See: https://matplotlib.org/stable/tutorials/introductory/customizing.html#the-default-matplotlibrc-file
:param rcdefaults: False*/ True, if True, revert to matplotlib defaults
:return: None
"""
if rcdefaults:
print('Return matplotlib rcparams to default settings.')
plt.rcdefaults()
return
plt.rc('figure', figsize=FIGURE_SIZE, dpi=FIGURE_DPI, autolayout=False)
plt.rc('lines', marker='o', color='r', linewidth=2, markersize=6)
plt.rc('errorbar', capsize=2)
plt.rc('legend', loc='best', frameon=False, fontsize=DEFAULT_FONTSIZE)
plt.rc('axes', linewidth=2, titleweight='bold', labelsize='large')
plt.rc('xtick', labelsize='large')
plt.rc('ytick', labelsize='large')
plt.rc('axes.formatter', limits=(-3, 3), offset_threshold=6)
plt.rc('image', cmap='viridis') # default colourmap, see https://matplotlib.org/stable/gallery/color/colormap_reference.html
# Note font values appear to only be set when plt.show is called
plt.rc(
'font',
family='serif',
style='normal',
weight='bold',
size=DEFAULT_FONTSIZE,
serif=['Times New Roman', 'Times', 'DejaVu Serif']
)
# plt.rcParams["savefig.directory"] = os.path.dirname(__file__) # Default save directory for figures
def labels(ttl=None, xvar=None, yvar=None, zvar=None, legend=False, size='Normal', font='Times New Roman'):
"""
Add formatted labels to current plot, also increases the tick size
:param ttl: title
:param xvar: x label
:param yvar: y label
:param zvar: z label (3D plots only)
:param legend: False/ True, adds default legend to plot
:param size: 'Normal' or 'Big'
:param font: str font name, 'Times New Roman'
:return: None
"""
if size.lower() in ['big', 'large', 'xxl', 'xl']:
tik = 30
tit = 32
lab = 35
leg = 25
else:
# Normal
tik = 18
tit = 20
lab = 22
leg = 18
plt.xticks(fontsize=tik, fontname=font)
plt.yticks(fontsize=tik, fontname=font)
plt.setp(plt.gca().spines.values(), linewidth=2)
if plt.gca().get_yaxis().get_scale() != 'log' and 'linear' in plt.gca().name:
plt.ticklabel_format(useOffset=False)
plt.ticklabel_format(style='sci', scilimits=(-3,3))
if ttl is not None:
plt.gca().set_title(ttl, fontsize=tit, fontweight='bold', fontname=font)
if xvar is not None:
plt.gca().set_xlabel(xvar, fontsize=lab, fontname=font)
if yvar is not None:
plt.gca().set_ylabel(yvar, fontsize=lab, fontname=font)
if zvar is not None:
# Don't think this works, use ax.set_zaxis
plt.gca().set_zlabel(zvar, fontsize=lab, fontname=font)
for t in plt.gca().zaxis.get_major_ticks():
t.label.set_fontsize(tik)
t.label.set_fontname(font)
if legend:
plt.legend(loc=0, frameon=False, prop={'size': leg, 'family': 'serif'})
def saveplot(name, dpi=None, figure=None):
"""
Saves current figure as a png in the home directory
:param name: filename, including or expluding directory and or extension
:param dpi: image resolution, higher means larger image size, default=matplotlib default
:param figure: figure number, default = plt.gcf()
:return: None
E.G.
---select figure to save by clicking on it---
saveplot('test')
E.G.
saveplot('c:\somedir\apicture.jpg', dpi=600, figure=3)
"""
if type(name) is int:
name = str(name)
if figure is None:
gcf = plt.gcf()
else:
gcf = plt.figure(figure)
dir = os.path.dirname(name)
file, ext = os.path.basename(name)
if len(dir) == 0:
dir = os.path.expanduser('~')
if len(ext) == 0:
ext = '.png'
savefile = os.path.join(dir, file+ext)
gcf.savefig(savefile, dpi=dpi)
print('Saved Figure {} as {}'.format(gcf.number, savefile))
def newplot(*args, **kwargs):
"""
Shortcut to creating a simple plot
E.G.
x = np.arange(-5,5,0.1)
y = x**2
newplot(x,y,'r-',lw=2,label='Line')
"""
if 'linewidth' and 'lw' not in kwargs.keys():
kwargs['linewidth'] = 2
plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
plt.plot(*args, **kwargs)
plt.setp(plt.gca().spines.values(), linewidth=2)
plt.xticks(fontsize=25, fontname='Times New Roman')
plt.yticks(fontsize=25, fontname='Times New Roman')
plt.ticklabel_format(useOffset=False)
plt.ticklabel_format(style='sci', scilimits=(-3, 3))
def multiplot(xvals, yvals=None, datarange=None, cmap='jet', labels=None, marker=None):
"""
Shortcut to creating a simple multiplot with either colorbar or legend
E.G.
x = np.arange(-5,5,0.1)
ys = [x**2, 1+x**2, 2+x**2, 3+x**2, 4+x**2]
datarange = [0,1,2,3,4]
multiplot(x, ys, datarange, cmap='winter')
OR:
x = np.arange(-5,5,0.1)
ys = [x**2, 1+x**2, 2+x**2, 3+x**2, 4+x**2]
labels = ['x*x','2+x*x','3+x*x','4+x*x']
multiplot(x, ys, labels=labels)
"""
if yvals is None:
yvals = xvals
xvals = []
yvals = np.asarray(yvals)
xvals = np.asarray(xvals)
if datarange is None:
datarange = range(len(yvals))
datarange = np.asarray(datarange,dtype=float)
cm = plt.get_cmap(cmap)
colrange = (datarange - datarange.min()) / (datarange.max() - datarange.min())
if marker is None:
marker = ''
linearg = '-' + marker
plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
for n in range(len(datarange)):
col = cm(colrange[n])
if len(xvals) == 0:
plt.plot(yvals[n], linearg, lw=2, color=col)
elif len(xvals.shape) == 1:
plt.plot(xvals, yvals[n], linearg, lw=2, color=col)
else:
plt.plot(xvals[n], yvals[n], linearg, lw=2, color=col)
plt.setp(plt.gca().spines.values(), linewidth=2)
plt.xticks(fontsize=25, fontname='Times New Roman')
plt.yticks(fontsize=25, fontname='Times New Roman')
plt.ticklabel_format(useOffset=False)
plt.ticklabel_format(style='sci', scilimits=(-3, 3))
if labels is None:
# Add Colorbar
sm = plt.cm.ScalarMappable(cmap=cm)
sm.set_array(datarange)
cbar = plt.colorbar(sm)
#cbar.set_label('variation [unit]', fontsize=24, fontweight='bold', fontname='Times New Roman')
else:
# Add legend
plt.legend(labels, loc=0, frameon=False, prop={'size':20,'family':'serif'})
def newplot3(*args, **kwargs):
"""
Shortcut to creating a simple 3D plot
Automatically tiles 1 dimensional x and y arrays to match 2D z array,
assuming z.shape = (len(x),len(y))
newplot3(x, y, z, ...)
E.G.
newplot3([1,2,3,4],[9,8,7],[[2,4,6],[8,10,12],[14,16,18],[20,22,24]],'-o')
"""
if 'linewidth' and 'lw' not in kwargs.keys():
kwargs['linewidth'] = 2
fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
ax = fig.add_subplot(111, projection='3d')
x = np.asarray(args[0], dtype=float)
y = np.asarray(args[1], dtype=float)
z = np.asarray(args[2], dtype=float)
if z.ndim == 2:
if x.ndim < 2:
x = np.tile(x, z.shape[1]).reshape(z.T.shape).T
if y.ndim < 2:
y = np.tile(y, z.shape[0]).reshape(z.shape)
# Plot each array independently
for n in range(len(z)):
ax.plot(x[n], y[n], z[n], *args[3:], **kwargs)
else:
ax.plot(*args, **kwargs)
def plot3darray(vec, *args, **kwargs):
"""
Plot 3D vectors in 3D
plt.plot(vec[:, 0], vec[:, 1], vec[:, 3], *args, **kwargs)
:param vec: [n*3] array
:param args: args to pass to plt.plot
:param kwargs: kwargs to pass to plt.plot
:return: matplotlib plot object
"""
vec = np.reshape(vec, (-1, 3))
return plt.plot(vec[:, 0], vec[:, 1], vec[:, 2], *args, **kwargs)
def sliderplot(YY, X=None, slidervals=None, *args, **kwargs):
"""
Shortcut to creating a simple 2D plot with a slider to go through a third dimension
YY = [nxm]: y axis data (initially plots Y[0,:])
X = [n] or [nxm]: x axis data (can be 1D or 2D, either same length or shape as Y)
slidervals = None or [m]: Values to give in the slider
E.G.
sliderplot([1,2,3],[[2,4,6],[8,10,12],[14,16,18],[20,22,24]],slidervals=[3,6,9,12])
"""
if 'linewidth' and 'lw' not in kwargs.keys():
kwargs['linewidth'] = 2
fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
X = np.asarray(X, dtype=float)
Y = np.asarray(YY, dtype=float)
if slidervals is None:
slidervals = range(Y.shape[0])
slidervals = np.asarray(slidervals, dtype=float)
if X.ndim < 2:
X = np.tile(X, Y.shape[0]).reshape(Y.shape)
plotline, = plt.plot(X[0, :], Y[0, :], *args, **kwargs)
plt.axis([X.min(), X.max(), Y.min(), Y.max()])
plt.subplots_adjust(bottom=0.2)
ax = plt.gca()
" Create slider on plot"
axsldr = plt.axes([0.15, 0.05, 0.65, 0.03], axisbg='lightgoldenrodyellow')
sldr = plt.Slider(axsldr, '', 0, len(slidervals) - 1)
txt = axsldr.set_xlabel('{} [{}]'.format(slidervals[0], 0), fontsize=18)
plt.sca(ax)
" Slider update function"
def update(val):
"Update function for pilatus image"
pno = int(np.floor(sldr.val))
plotline.set_xdata(X[pno, :])
plotline.set_ydata(Y[pno, :])
txt.set_text('{} [{}]'.format(slidervals[pno], pno))
plt.draw()
plt.gcf().canvas.draw()
# fig1.canvas.draw()
sldr.on_changed(update)
def sliderplot2D(ZZZ, XX=None, YY=None, slidervals=None, *args, **kwargs):
"""
Shortcut to creating an image plot with a slider to go through a third dimension
ZZZ = [nxmxo]: z axis data
XX = [nxm] or [n]: x axis data
YY = [nxm] or [m]: y axis data
slidervals = None or [o]: Values to give in the slider
if XX and/or YY have a single dimension, the 2D values are generated via meshgrid
E.G.
sliderplot([1,2,3],[[2,4,6],[8,10,12],[14,16,18],[20,22,24]],slidervals=[3,6,9,12])
"""
if 'linewidth' and 'lw' not in kwargs.keys():
kwargs['linewidth'] = 2
fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
ZZZ = np.asarray(ZZZ, dtype=float)
if slidervals is None:
slidervals = range(ZZZ.shape[2])
slidervals = np.asarray(slidervals, dtype=float)
if XX is None:
XX = range(ZZZ.shape[1])
if YY is None:
YY = range(ZZZ.shape[0])
XX = np.asarray(XX, dtype=float)
YY = np.asarray(YY, dtype=float)
if XX.ndim < 2:
XX, YY = np.meshgrid(XX, YY)
p = plt.pcolormesh(XX, YY, ZZZ[:, :, 0])
# p.set_clim(cax)
plt.subplots_adjust(bottom=0.2)
ax = plt.gca()
ax.set_aspect('equal')
ax.autoscale(tight=True)
" Create slider on plot"
axsldr = plt.axes([0.15, 0.05, 0.65, 0.03], axisbg='lightgoldenrodyellow')
sldr = plt.Slider(axsldr, '', 0, len(slidervals) - 1)
txt = axsldr.set_xlabel('{} [{}]'.format(slidervals[0], 0), fontsize=18)
plt.sca(ax)
" Slider update function"
def update(val):
"Update function for pilatus image"
pno = int(np.round(sldr.val))
p.set_array(ZZZ[:-1, :-1, pno].ravel())
txt.set_text('{} [{}]'.format(slidervals[pno], pno))
plt.draw()
plt.gcf().canvas.draw()
# fig1.canvas.draw()
sldr.on_changed(update)
def plot_cell(cell_centre=[0, 0, 0], CELL=np.eye(3), color='k'):
"""
Plot a box defined by a unit cell on the current plot
:param cell_centre: [1x3] array : centre of cell, default [0,0,0]
:param CELL: [3x3] array : unit cell vectors [A,B,C]
:return: None
"""
uvw = np.array([[0., 0, 0], [1, 0, 0], [1, 0, 1], [1, 1, 1], [1, 1, 0], [0, 1, 0], [0, 1, 1],
[0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 1, 0], [0, 0, 0], [0, 0, 1]])
uvw = uvw - 0.5 # plot around box centre
bpos = np.dot(uvw, CELL)
bpos = bpos + cell_centre
plt.plot(bpos[:, 0], bpos[:, 1], bpos[:, 2], c=color) # cell box
def plot_circle(radius=1.0, centre=[0,0], height=0, *args, **kwargs):
"""
Generate circle on current plot
:param radius: radius of the circle
:param centre: [x,y] centre of the circle
:param height: reduce the radius by increasing the height from a surface
:param args: plot commands
:param kwargs: plot commands
:return: none
"""
deg = np.linspace(0, 360, 361)
rad = np.deg2rad(deg)
x = centre[0] + np.sqrt((radius**2-height**2))*np.cos(rad)
y = centre[1] + np.sqrt((radius**2-height**2))*np.sin(rad)
plt.plot(x, y, *args, **kwargs)
def plot_arrow(x, y, z=None, col='r', width=2, arrow_size=40):
"""
Plot arrow in 2D or 3D on current axes
Usage 2D:
plot_arrow([xi,xf],[yi,yf])
Usage 3D:
plot_arrow([xi,xf],[yi,yf],[zi,zf])
Options:
width = line width (Def. = 2)
arrow_size = size of arrow head (Def. = 40)
col = arrow color (Deg. = red)
"""
# 2D Arrow
if z is None or not hasattr(plt.gca(), 'get_zlim'):
x0 = x[0]
y0 = y[0]
dx = x[1] - x[0]
dy = y[1] - y[0]
plt.arrow(x0, y0, dx, dy, width=arrow_size / 4000.0, color=col, length_includes_head=True)
# V = FancyArrowPatch(x,y, mutation_scale=arrow_size, lw=width, arrowstyle="-|>", color=col)
# plt.gca().add_artist(V)
return
# 3D Arrow
V = Arrow3D(x, y, z, mutation_scale=arrow_size, lw=width, arrowstyle="-|>", color=col)
plt.gca().add_artist(V)
class Arrow3D(FancyArrowPatch):
"""
FancyArrow3D patch for 3D arrows, by CT Zhu
http://stackoverflow.com/questions/22867620/putting-arrowheads-on-vectors-in-matplotlibs-3d-plot
Useage:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([0,1],[0,0],[0,0],'k-')
ax.plot([0,0],[0,1],[0,0],'k-')
ax.plot([0,0],[0,0],[0,1],'k-')
v = Arrow3D([0,1],[0,1],[0,1], mutation_scale=20, lw=3, arrowstyle="-|>", color="r")
ax.add_artist(v)
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
if 'arrowstyle' not in kwargs.keys():
kwargs['arrowstyle'] = "-|>"
if 'mutation_scale' not in kwargs.keys():
kwargs['mutation_scale'] = 20
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
'----------------------- Crystal Plotting Programs----------------------'
def vecplot(UV, mode='hk0', axis=None, *args, **kwargs):
"""
Plot grid of a,b vectors on current axis
:param UV: [a;b;c] array of unit vectors
:param mode: definition of axis plane, 'hk0', 'h0l', '0kl', 'hhl'
:param axis: axis to create lines on, if None, plt.gca is used
:param args: arguments to pass to plot command, e.g. linewidth, alpha, color
:return: None
"""
if mode == 'h0l':
# h0l
UV = np.dot(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]]), fg.rot3D(UV, gamma=-90))
elif mode == '0kl':
# 0kl
UV = np.dot(np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]), UV)
elif mode == 'hhl':
# hhl ***untested
UV = np.dot(np.array([[1, 1, 0], [0, 0, 1], [0, 1, 0]]), UV)
if axis is None:
axis = plt.gca()
axsize = axis.axis()
latt = axis_lattice_points(UV[0], UV[1], axis=axsize)
plot_lattice_lines(latt, UV[0], UV[1], axis=axis, *args, **kwargs)
def UV_arrows(UV, alabel='a', blabel='b', clabel='c'):
"""
Plot arrows with a*,b* on current figure
"""
# Get current axis size
ax = plt.gca()
if ax.name.lower() == '3d':
# 3D plot
ax_xlim = ax.get_xlim()
ax_ylim = ax.get_ylim()
ax_zlim = ax.get_zlim()
arrow_size = 40
color = 'k'
fontsize = 18
plot_arrow([0, UV[0, 0]], [0, UV[0, 1]], [0, UV[0, 2]], arrow_size=arrow_size, col=color)
ax.text(UV[0, 0], UV[0, 1], UV[0, 2], alabel, fontname=DEFAULT_FONT, weight='bold', size=fontsize)
plot_arrow([0, UV[1, 0]], [0, UV[1, 1]], [0, UV[1, 2]], arrow_size=arrow_size, col=color)
ax.text(UV[1, 0], UV[1, 1], UV[1, 2], blabel, fontname=DEFAULT_FONT, weight='bold', size=fontsize)
plot_arrow([0, UV[2, 0]], [0, UV[2, 1]], [0, UV[2, 2]], arrow_size=arrow_size, col=color)
ax.text(UV[2, 0], UV[2, 1], UV[2, 2], clabel, fontname=DEFAULT_FONT, weight='bold', size=fontsize)
ax.set_xlim(ax_xlim)
ax.set_ylim(ax_ylim)
ax.set_zlim(ax_zlim)
return
# 2D plot
axsize = ax.axis()
asty = dict(arrowstyle="->")
plt.annotate("", xy=(UV[0, 0], UV[0, 1]), xytext=(0.0, 0.0), arrowprops=asty)
plt.annotate("", xy=(UV[1, 0], UV[1, 1]), xytext=(0.0, 0.0), arrowprops=asty)
plt.annotate(alabel, (0.1 + UV[0, 0], UV[0, 1] - 0.2))
plt.annotate(blabel, (UV[1, 0] - 0.2, 0.1 + UV[1, 1]))
ax.axis(axsize)
def axis_lattice_points(vec_a=[1, 0, 0], vec_b=[0, 1, 0], axis=[-4, 4, -4, 4]):
"""
Generate a 2D lattice of points generated by 2 vectors within a 2D axis
:param vec_a: [1x3] array : a* vector
:param vec_b: [1x3] array : b* vector
:param axis: [1x4] axis array, plt.axis()
:return: [nx3] array of lattice points
"""
# Vectors
A = np.asarray(vec_a, dtype=float).reshape([3])
B = np.asarray(vec_b, dtype=float).reshape([3])
# Generate a 3D cell to make use of indx function
U = np.array([A, B, np.cross(A, B)])
corners = [[axis[1], axis[2], 0],
[axis[1], axis[3], 0],
[axis[0], axis[2], 0],
[axis[0], axis[3], 0]]
# Determine the coefficients required to generate lattice points of the 2 vectors at
# all 4 corners of the axis
idx = fc.indx(corners, U)
min_x = np.floor(np.min(idx[:, 0]))
max_x = np.ceil(np.max(idx[:, 0]))
min_y = np.floor(np.min(idx[:, 1]))
max_y = np.ceil(np.max(idx[:, 1]))
hkl = fc.genHKL([min_x, max_x], [min_y, max_y], 0)
latt = np.dot(hkl, U)
return latt
def plot_lattice_points2D(Q, markersize=12, color='b', marker='o'):
"""
Add points to the current axis
:param Q: [nx2/3] array : lattice points to plot
:param markersize: default 12
:param color: default 'b'
:param marker: default 'o'
:return: None
"""
ax = plt.gca()
axsize = ax.axis()
ax.plot(Q[:, 0], Q[:, 1], markersize=markersize, color=color, marker=marker)
ax.axis(axsize)
def plot_lattice_points3D(Q, point_size=None, color=None, cmap=None):
"""
Plot lattice points is 3D reciprocal space
:param Q: [nx3] array of wavevector transfer positions in reciprocal space, units A^-1
:param point_size: scalar or array of length n, determines each point size (for intensity), in pixels
:param color: colour specifier, can be a list of values length n
:param cmap: str name of colormap to use if color is a list of values
:return:
"""
fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(Q[:, 0], Q[:, 1], Q[:, 2], s=point_size, c=color, cmap=cmap)
labels(None, 'Q$_x$', 'Q$_y$', 'Q$_z$')
ax.set_xlim([4, -4])
ax.set_ylim([4, -4])
ax.set_zlim([-4, 4])
def plot_lattice_lines(latt, vec_a=(1, 0, 0), vec_b=(0, 1, 0), axis=None, *args, **kwargs):
"""
Add lines defining the reciprocal lattice to the current plot
Generates square or hexagonal lines where vertices are the lattice points within the image.
:param latt: [nx2/3] array : points at which to generate lattice
:param vec_a: [1x2/3] array : a* vector
:param vec_b: [1x2/3] array : b* vector
:param axis: axis to plot on (None for plt.gca)
:param args: argments to pass to plot function, e.g. linewidth, alpha, color
:return: None
"""
if axis is None:
axis = plt.gca()
axsize = axis.axis()
# vectors
A = np.asarray(vec_a, dtype=float).reshape([3])
B = np.asarray(vec_b, dtype=float).reshape([3])
# Angle between vectors
angle = fg.ang(A, B)
# At each lattice point, draw the unit vectors
for n in range(len(latt)):
lp = latt[n, :]
uv1_1 = lp - A
uv1_2 = lp + A
uv2_1 = lp - B
uv2_2 = lp + B
axis.plot([uv1_1[0], uv1_2[0]], [uv1_1[1], uv1_2[1]], 'k-', *args, **kwargs)
axis.plot([uv2_1[0], uv2_2[0]], [uv2_1[1], uv2_2[1]], 'k-', *args, **kwargs)
if abs(angle - np.pi / 3) < 0.01: # 60Deg
uv3_1 = lp + A - B
uv3_2 = lp - A + B
axis.plot([uv3_1[0], uv3_2[0]], [uv3_1[1], uv3_2[1]], 'k-', *args, **kwargs)
elif abs(angle - 2 * np.pi / 3) < 0.01: # 120 Deg
uv3_1 = lp + A + B
uv3_2 = lp - A - B
axis.plot([uv3_1[0], uv3_2[0]], [uv3_1[1], uv3_2[1]], 'k-', *args, **kwargs)
axis.axis(axsize)
def plot_vector_arrows(vec_a=[1, 0, 0], vec_b=[1, 0, 0], vec_a_lab=None, vec_b_lab=None,
arrow_size=40, color='b', fontsize=18, axis=None):
"""
Plot vector arrows for Cell on current axis
Will generate two arrows on the current axis, pointing from the origin to vec_a and vec_b, respectivley.
:param vec_a: [1x2/3] array : a* vector
:param vec_b: [1x2/3] array : b* vector
:param vec_a_lab: str : e.g. 'a*'
:param vec_b_lab: str : e.g. 'b*'
:param arrow_size: size of arrow, default 40
:param color: arror colour, default 'b'
:param fontsize: text size, default 18
:return: None
"""
vec_a = np.asarray(vec_a).reshape([-1, np.shape(vec_a)[-1]])
vec_b = np.asarray(vec_b).reshape((-1, np.shape(vec_b)[-1]))
if axis is None:
axis = plt.gca()
axsize = axis.axis()
# Vector arrows and lattice point labels
if vec_a_lab is None:
vec_a_lab = 'a*'
if vec_b_lab is None:
vec_b_lab = 'b*'
plt.sca(axis)
plot_arrow([0, vec_a[0, 0]], [0, vec_a[0, 1]], arrow_size=arrow_size, col=color)
plt.text(vec_a[0, 0], vec_a[0, 1], vec_a_lab, fontname=DEFAULT_FONT, weight='bold', size=fontsize)
plot_arrow([0, vec_b[0, 0]], [0, vec_b[0, 1]], arrow_size=arrow_size, col=color)
plt.text(vec_b[0, 0], vec_b[0, 1], vec_b_lab, fontname=DEFAULT_FONT, weight='bold', size=fontsize)
axis.axis(axsize)
def plot_ewald_coverage(energy_kev, color='k', linewidth=2):
"""
Plot Ewald coverage of a single axis diffractometer on current plot in 2D
Includes boundaries for theta=0, twotheta=180 and theta=twotheta
:param energy_kev: float
:param color: str
:param linewidth: float
:return: None
"""
q_max = fc.calqmag(180, energy_kev)
# calculate diffractometer angles
angles = np.arange(0, 180, 0.1)
Q1x, Q1y = fc.diffractometer_Q(angles, 180, energy_kev) # delta=180
Q2x, Q2y = fc.diffractometer_Q(angles, angles, energy_kev) # eta=delta
Q3x, Q3y = fc.diffractometer_Q(0, angles, energy_kev) # eta=0
# Add diffractometer angles
plt.plot(Q1x, Q1y, color, linewidth, label=r'2$\theta$=180')
plt.plot(Q2x, Q2y, color, linewidth, label=r'2$\theta$=$\theta$')
plt.plot(Q3x, Q3y, color, linewidth, label=r'$\theta$=0')
plt.axis([-q_max, q_max, 0, q_max])
def plot_diffractometer_reciprocal_space(phi, chi, eta, mu, delta, gamma, uv, u, lab, energy_kev):
"""
Plot crystal axes in lab frame of 6-circle diffractometer
:param phi:
:param chi:
:param eta:
:param mu:
:param delta:
:param gamma:
:param uv:
:param u:
:param lab:
:param energy_kev:
:return:
"""
uvstar = fc.RcSp(uv)
maxhkl = fc.maxHKL(2, uvstar)
hkl = fc.genHKL(*maxhkl)
r = fc.diffractometer_rotation(phi, chi, eta, mu)
qdet = fc.diff6circleq(delta, gamma, energy_kev, lab=lab)
ki, kf = fc.diff6circlek(delta, gamma, energy_kev, lab=lab)
qlab = fc.labwavevector(hkl, uv, u, r, lab)
astar = fc.labwavevector([1, 0, 0], uv, u, r, lab)
bstar = fc.labwavevector([0, 1, 0], uv, u, r, lab)
cstar = fc.labwavevector([0, 0, 1], uv, u, r, lab)
fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)
ax = fig.add_subplot(111, projection='3d')
def pltvec(vec, *args, **kwargs):
vec = np.reshape(vec, (-1, 3))
return plt.plot(vec[:, 1], vec[:, 2], vec[:, 0], *args, **kwargs)
pltvec(qlab, 'r+', ms=12, label='hkl')
pltvec([-ki, [0, 0, 0], kf, [0, 0, 0], qdet], 'k-', lw=5, label='q = kf - ki')
pltvec([[0, 0, 0], qdet], 'm-', lw=5, label='q = kf - ki')
pltvec([[0, 0, 0], astar], 'b-', lw=5, label='astar')
pltvec([[0, 0, 0], bstar], 'g-', lw=5, label='bstar')
pltvec([[0, 0, 0], cstar], 'y-', lw=5, label='cstar')
labels(None, 'Y', 'Z', 'X', legend=True)
ax.set_xlim([2, -2])
ax.set_ylim([2, -2])
ax.set_zlim([-2, 2])
#ax.invert_xaxis()
#ax.invert_yaxis()
plt.show()
def plot_xray_scattering_factor(elements, maxq=10):
"""
Plot x-ray scattering factor for 1 or more elements
:param elements:
:return: None
"""
q = np.linspace(0, maxq, 200)
xrf = fc.xray_scattering_factor(elements, q)
newplot(q, xrf)
plt.legend(np.asarray(elements).reshape(-1), loc=0, frameon=False, fontsize=18)
labels('X-Ray Scattering Factor', 'Q [$\AA^{-1}$]')
def plot_magnetic_form_factor(elements, maxq=10):
"""
Plot magnetic form factor for 1 or more elements
:param elements:
:return: None
"""
q = np.linspace(0, maxq, 200)
mff = fc.magnetic_form_factor(elements, q)
newplot(q, mff)
plt.legend(np.asarray(elements).reshape(-1), loc=0, frameon=False, fontsize=18)
labels('Magnetic Form Factor', 'Q [$\AA^{-1}$]')
def plot_xray_attenuation(elements, min_energy=0, max_energy=20):
"""
Plot x-ray scattering factor for 1 or more elements
:param elements:
:return: None
"""
Zarray = fc.atom_properties(elements, 'Z')
ene = np.arange(min_energy, max_energy+0.01, 0.01)
Aarray = fc.attenuation(Zarray, ene)
newplot(ene, Aarray)
plt.yscale('log')
plt.xlim([min_energy, max_energy])
plt.legend(np.asarray(elements).reshape(-1), loc=0, frameon=False, fontsize=18)
labels('X-Ray Attenuation', 'Energy [keV]', r'$\mu/\rho$ [cm$^2$/g]')
def plot_atomic_scattering_factor(element, min_energy=0.5, max_energy=20):
"""
Plot atomic scattering factor for 1 or more elements
:param element: str name of element to plot
:param min_energy: float min energy in keV
:param max_energy: float max energy in keV
:return: None
"""
ene = np.arange(min_energy, max_energy+0.01, 0.01)
f1, f2 = fc.atomic_scattering_factor(element, ene)
newplot(ene, f1, '-', lw=2, label='f1')
plt.plot(ene, f2, '-', lw=2, label='f2')
plt.xlim([min_energy, max_energy])
labels('X-Ray Scattering Factor\n%s' % element, 'Energy [keV]', None, legend=True)
def plot_xray_transmission(chemical_formula, density=8.9, energy_range=None, thickness_um=100):
"""
Plot transmission of x-ray through a slab of material at range of energies
Equivalent to https://henke.lbl.gov/optical_constants/filter2.html
Based on formulas from: Henke, Gullikson, and Davis, Atomic Data and Nuclear Data Tables 54 no.2, 181-342 (July 1993)
:param chemical_formula: str molecular formula
:param density: float density in g/cm^3
:param energy_range: array x-ray energy in keV, None for default range
:param thickness_um: slab thickness in microns
:return: float or array
"""
if energy_range is None:
energy_range = np.arange(0.03, 20, 0.01)
transmission = fc.filter_transmission(chemical_formula, energy_range, density, thickness_um)
ttl = '%s Density=%5.3f, thickness=%3.3g μm' % (chemical_formula, density, thickness_um)
newplot(energy_range, transmission)
labels(ttl, 'Energy [keV]', 'Transmission')
def plot_xray_attenuation_length(chemical_formula, density=8.9, energy_range=None, grazing_angle=90):
"""
Plot the X-Ray Attenuation Length of a compound
Equivalent to: https://henke.lbl.gov/optical_constants/atten2.html
Based on formulas from: Henke, Gullikson, and Davis, Atomic Data and Nuclear Data Tables 54 no.2, 181-342 (July 1993)
:param chemical_formula: str molecular formula
:param density: float density in g/cm^3
:param energy_range: array x-ray energy in keV, None for default range
:param grazing_angle: incidence angle relative to the surface, in degrees
:return: float or array, in microns
"""
if energy_range is None:
energy_range = np.arange(0.03, 20, 0.01)
transmission = fc.molecular_attenuation_length(chemical_formula, energy_range, density, grazing_angle)
ttl = '%s Density=%5.3f, Angle=%3.3g deg' % (chemical_formula, density, grazing_angle)
newplot(energy_range, transmission)
labels(ttl, 'Energy [keV]', 'Atten Length [μm]')
def plot_xray_reflectivity(chemical_formula, density=8.9, energy_range=None, grazing_angle=2):
"""
Plot the specular reflectivity of a material
From: https://xdb.lbl.gov/Section4/Sec_4-2.html
:param chemical_formula: str molecular formula
:param density: float, density in g/cm^3
:param energy_range: float or array, x-ray energy in keV
:param grazing_angle: float, incidence angle relative to the surface, in degrees
:return: float or array
"""
if energy_range is None:
energy_range = np.arange(0.03, 20, 0.01)
reflectivity = fc.molecular_reflectivity(chemical_formula, energy_range, density, grazing_angle)
ttl = '%s Density=%5.3f, Angle=%3.3g deg' % (chemical_formula, density, grazing_angle)
newplot(energy_range, reflectivity)
labels(ttl, 'Energy [keV]', 'Atten Length [μm]')
def plot_xray_refractive_index(chemical_formula, density=8.9, energy_range=None):
"""
Plot the Complex Index of Refraction of a compound
n = 1 - (1/2pi)N*r0*lambda^2*(f1+if2) = 1 - Delta - iBeta
Equivalent to: https://henke.lbl.gov/optical_constants/getdb2.html
Based on formulas from: Henke, Gullikson, and Davis, Atomic Data and Nuclear Data Tables 54 no.2, 181-342 (July 1993)
:param chemical_formula: str molecular formula
:param density: float density in g/cm^3
:param energy_range: array x-ray energy in keV, None for default range
:return: float or array, in microns
"""
if energy_range is None:
energy_range = np.arange(0.03, 20, 0.01)
n, delta, beta = fc.molecular_refractive_index(chemical_formula, energy_range, density)
ttl = '%s Density=%5.3f\nIndex of Refraction = 1 - δ - iβ' % (chemical_formula, density)
newplot(energy_range, delta, 'r-', lw=2, label='δ')
plt.plot(energy_range, beta, 'b-', lw=2, label='β')
labels(ttl, 'Energy [keV]', None, legend=True)
plt.xscale('log')
plt.yscale('log') | PypiClean |
/OctoBot-0.4.54.tar.gz/OctoBot-0.4.54/octobot/community/errors_upload/errors_uploader.py | import asyncio
import aiohttp
import octobot_commons.logging
class ErrorsUploader:
"""
ErrorsUploader manages errors posts to the error url
"""
def __init__(self, upload_url):
self.upload_url = upload_url
self.loop = None
self.upload_delay = 5
self._to_upload_errors = []
self._upload_task = None
self.logger = octobot_commons.logging.get_logger(self.__class__.__name__)
def schedule_error_upload(self, error):
"""
Called to schedule an error upload
:param error: the octobot_commons.logging.error_model.Error to upload
"""
self._add_error(error)
self._ensure_upload_task()
def _add_error(self, error):
for existing_error in self._to_upload_errors:
# first check if error is equivalent to an existing one
if existing_error.is_equivalent(error):
existing_error.merge_equivalent(error)
return
self._to_upload_errors.append(error)
def _ensure_upload_task(self):
try:
if self._ensure_event_loop() and (self._upload_task is None or self._upload_task.done()):
self._schedule_upload()
except Exception as err:
self.logger.exception(
err,
True,
f"Error when uploading exception: {err}",
skip_post_callback=True,
)
async def _upload_errors(self, session, errors):
async with session.post(self.upload_url, json=self._get_formatted_errors(errors)) as resp:
if resp.status != 200:
self.logger.error(
f"Impossible to upload error : status code: {resp.status}, text: {await resp.text()}",
skip_post_callback=True
)
@staticmethod
def _get_formatted_errors(errors):
return [error.to_dict() for error in errors]
def _schedule_upload(self):
self._upload_task = self.loop.create_task(
self._upload_soon()
)
async def _upload_soon(self):
try:
await asyncio.sleep(self.upload_delay)
if self._to_upload_errors:
async with aiohttp.ClientSession() as session:
errors = self._to_upload_errors
self._to_upload_errors = []
await self._upload_errors(session, errors)
self.logger.debug(f"Uploaded {len(errors)} errors")
except Exception as err:
self.logger.exception(
err, True, f"Error when uploading exception: {err}", skip_post_callback=True
)
finally:
if self._to_upload_errors:
# reschedule if new errors arrived during upload
self._schedule_upload()
def _ensure_event_loop(self):
if self.loop is not None:
if self.loop.is_running():
return True
# otherwise, use the current loop
try:
self.loop = asyncio.get_event_loop()
return True
except RuntimeError:
return False | PypiClean |
/CatLearn-0.6.2.tar.gz/CatLearn-0.6.2/catlearn/regression/gpfunctions/io.py | import pickle
import h5py
import numpy as np
from catlearn.regression import GaussianProcess
def write(filename, model, ext='pkl'):
"""Function to write a pickle of model object.
Parameters
----------
filename : str
The name of the save file.
model : obj
Python GaussianProcess object.
ext : str
Format to save GP, can be pkl or hdf5. Default is pkl.
"""
if ext is 'pkl':
with open('{}.pkl'.format(filename), 'wb') as outfile:
pickle.dump(model, outfile, pickle.HIGHEST_PROTOCOL)
elif ext is 'hdf5':
train_features = model.train_fp
train_targets = model.train_target
regularization = model.regularization
kernel_list = model.kernel_list
write_train_data(
filename, train_features, train_targets, regularization,
kernel_list)
else:
raise NotImplementedError('{} file extension not implemented.'.format(
ext))
def read(filename, ext='pkl'):
"""Function to read a pickle of model object.
Parameters
----------
filename : str
The name of the save file.
ext : str
Format to save GP, can be pkl or hdf5. Default is pkl.
Returns
-------
model : obj
Python GaussianProcess object.
"""
if ext is 'pkl':
with open('{}.pkl'.format(filename), 'rb') as infile:
return pickle.load(infile)
elif ext is 'hdf5':
train_features, train_targets, regularization, kernel_list = \
read_train_data(filename)
gp = GaussianProcess(
train_fp=train_features, train_target=train_targets,
kernel_list=kernel_list, regularization=regularization,
optimize_hyperparameters=False)
return gp
else:
raise NotImplementedError('{} file extension not implemented.'.format(
ext))
def write_train_data(filename, train_features, train_targets, regularization,
kernel_list):
"""Function to write raw training data.
Parameters
----------
filename : str
The name of the save file.
train_features : arr
Arry of the training features.
train_targets : list
A list of the training targets.
regularization : float
The regularization parameter.
kernel_list : dict
The list containing dictionaries for the kernels.
"""
f = h5py.File('{}.hdf5'.format(filename), 'w')
f.create_dataset('train_features', data=train_features, compression='gzip',
compression_opts=9)
f.create_dataset('train_targets', data=train_targets, compression='gzip',
compression_opts=9)
f.create_dataset('regularization', data=regularization)
_kernel_list_to_group(f, '/', kernel_list)
def read_train_data(filename):
"""Function to read raw training data.
Parameters
----------
filename : str
The name of the save file.
Returns
-------
train_features : arr
Arry of the training features.
train_targets : list
A list of the training targets.
regularization : float
The regularization parameter.
kernel_list : list
The dictionary containing parameters for the kernels.
"""
f = h5py.File('{}.hdf5'.format(filename), 'r')
train_features = np.asarray(f['train_features'])
train_targets = np.asarray(f['train_targets'])
regularization = float(np.asarray(f['regularization']))
kernel_list = _load_kernel_list_from_group(f)
return train_features, train_targets, regularization, kernel_list
def _kernel_list_to_group(h5file, path, klist):
"""Convert a list of dictionaries to group format.
Parameters
----------
h5file : hdf5
An open hdf5 file object.
path : str
The path to write data in the hdf5 file object.
klist : list
List of dictionaries to save in hdf5 format.
"""
for i, kdict in enumerate(klist):
_dict_to_group(h5file, '/kernel_list/' + str(i) + '/', kdict)
def _load_kernel_list_from_group(h5file):
"""Convert a list of dictionaries to group format.
Parameters
----------
h5file : hdf5
An open hdf5 file object.
path : str
The path to write data in the hdf5 file object.
klist : list
List of dictionaries to save in hdf5 format.
Returns
-----------
kernel_list : list
List of dictionaries for all the kernels.
"""
h5file.keys()
kernel_list = []
for key, item in h5file['/kernel_list/'].items():
kernel_list.append(_load_dict_from_group(h5file,
'/kernel_list/' + key + '/'))
return kernel_list
def _dict_to_group(h5file, path, sdict):
"""Convert dictionary format to group format.
Parameters
----------
h5file : hdf5
An open hdf5 file object.
path : str
The path to write data in the hdf5 file object.
sdict : dict
Dictionary to save in hdf5 format.
"""
for key, item in sdict.items():
if isinstance(item,
(np.ndarray, np.int64, np.float64, str, float, list)):
h5file[path + key] = item
elif isinstance(item, dict):
_dict_to_group(h5file, path + key + '/', item)
else:
raise ValueError('Cannot save %s type' % type(item))
def _load_dict_from_group(h5file, path):
"""Convert group format to dictionary format.
Parameters
----------
h5file : hdf5
An open hdf5 file object.
path : str
The path to load data from the hdf5 file object.
Returns
-------
rdict : dict
The resulting dictionary.
"""
rdict = {}
for key, item in h5file[path].items():
if key != 'train_features' and key != 'train_targets' and \
key != 'regularization':
if isinstance(item, h5py._hl.dataset.Dataset):
rdict[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
rdict[key] = _load_dict_from_group(h5file, path + key + '/')
return rdict | PypiClean |
/BIO-PEPPA-1.2.1.tar.gz/BIO-PEPPA-1.2.1/modules/clust.py | import argparse, tempfile, glob, os, subprocess, sys, shutil
try:
from configure import externals, uopen, xrange, logger, transeq
except :
from .configure import externals, uopen, xrange, logger, transeq
def readFasta(fasta) :
sequence = []
with uopen(fasta) as fin :
for line in fin :
if line.startswith('>') :
name = line[1:].strip().split()[0]
sequence.append([name, []])
elif len(line) > 0 and not line.startswith('#') :
sequence[-1][1].extend(line.strip().split())
for s in sequence :
s[1] = (''.join(s[1])).upper()
return sequence
def clust(argv) :
parser = argparse.ArgumentParser(description='Get clusters and exemplars of clusters from gene sequences using mmseqs linclust.')
parser.add_argument('-i', '--input', help='[INPUT; REQUIRED] name of the file containing gene sequneces in FASTA format.', required=True)
parser.add_argument('-p', '--prefix', help='[OUTPUT; REQUIRED] prefix of the outputs.', required=True)
parser.add_argument('-d', '--identity', help='[PARAM; DEFAULT: 0.9] minimum intra-cluster identity.', default=0.9, type=float)
parser.add_argument('-c', '--coverage', help='[PARAM; DEFAULT: 0.9] minimum intra-cluster coverage.', default=0.9, type=float)
parser.add_argument('-t', '--n_thread', help='[PARAM; DEFAULT: 8] number of threads to use.', default=8, type=int)
parser.add_argument('-a', '--translate', help='[PARAM; DEFAULT: False] activate to cluster in translated sequence.', default=False, action='store_true')
args = parser.parse_args(argv)
exemplar, clust = getClust(args.prefix, args.input, args.__dict__)
logger('Exemplar sequences in {0}'.format(exemplar))
logger('Clusters in {0}'.format(clust))
return exemplar, clust
def getClust(prefix, genes, params) :
groups = {}
dirPath = tempfile.mkdtemp(prefix='NS_', dir='.')
try:
if not params['translate'] :
geneFile = genes
else :
na_seqs = readFasta(genes)
aa_seqs = transeq(na_seqs, frame='1', transl_table='starts')
with open(os.path.join(dirPath, 'seq.aa'), 'w') as fout :
for n, s in aa_seqs :
fout.write('>{0}\n{1}\n'.format(n, s[0]))
geneFile = os.path.join(dirPath, 'seq.aa')
seqDb = os.path.join(dirPath, 'seq.db')
tmpDb = os.path.join(dirPath, 'tmp')
lcDb = os.path.join(dirPath, 'seq.lc')
tabFile = os.path.join(dirPath, 'clust.tab')
refFile = os.path.join(dirPath, 'seq.ref')
nRef = 999999999999999
for ite in xrange(3) :
if os.path.isdir(tmpDb) :
shutil.rmtree(tmpDb)
os.makedirs(tmpDb)
if os.path.isfile(seqDb) :
list(map(os.unlink, glob.glob(seqDb + '*')))
if os.path.isfile(lcDb) :
list(map(os.unlink, glob.glob(lcDb + '*')))
subprocess.Popen('{0} createdb {2} {1} -v 0'.format(externals['mmseqs'], seqDb, geneFile).split()).communicate()
subprocess.Popen('{0} linclust {1} {2} {3} --min-seq-id {4} -c {5} --threads {6} -v 0'.format( \
externals['mmseqs'], seqDb, lcDb, tmpDb, params['identity'], params['coverage'], params['n_thread']).split(), stdout=subprocess.PIPE).communicate()
subprocess.Popen('{0} createtsv {1} {1} {2} {3}'.format(\
externals['mmseqs'], seqDb, lcDb, tabFile).split(), stdout = subprocess.PIPE).communicate()
with open(tabFile) as fin :
for line in fin :
part = line.strip().split()
groups[part[1]] = part[0]
tmp = []
with open(geneFile) as fin :
toWrite, used_grps = False, {None:1}
for line in fin :
if line.startswith('>') :
name = line[1:].strip().split()[0]
grp = groups.get(name, None)
toWrite = False if grp in used_grps else True
if toWrite :
used_grps[grp] = name
if toWrite :
tmp.append(line)
for gene, grp in groups.items() :
if grp in used_grps :
groups[gene] = used_grps[grp]
with open(refFile, 'w') as fout :
for line in tmp :
fout.write(line)
if nRef <= len(used_grps) :
break
nRef = len(used_grps)
geneFile = refFile
if not params['translate'] :
shutil.copy2(refFile, '{0}.clust.exemplar'.format(prefix))
else :
rSeq = readFasta(refFile)
na_seqs = dict(na_seqs)
with open('{0}.clust.exemplar'.format(prefix), 'w') as fout :
for n, s in rSeq:
fout.write('>{0}\n{1}\n'.format(n, na_seqs[n]))
finally :
shutil.rmtree(dirPath)
with open('{0}.clust.tab'.format(prefix), 'w') as fout :
for gene, grp in sorted(groups.items()) :
g = gene
while g != grp :
g, grp = grp, groups[grp]
groups[gene] = grp
fout.write('{0}\t{1}\n'.format(gene, grp))
return '{0}.clust.exemplar'.format(prefix), '{0}.clust.tab'.format(prefix)
if __name__ == '__main__' :
clust(sys.argv[1:]) | PypiClean |
/Flickr_Mirror_Ngoc_Dang-1.0.3-py3-none-any.whl/mirroring_flickr/parser.py | """Modules"""
import argparse
import mirroring_flickr.constants as constants
import os
import logging
import json
from mirroring_flickr.cache_strategy import CachingStrategy
import stat
import getpass
import requests
# Waypoint 8
def path(string):
"""
A function to check whether
a string is a path of not. If
it does not exist
:param string(str): a string
represents a path
"""
# check if the directory is existed
# path with a tilde symbol(~/) or dot symbol(./)
# create the directory if it's existed
if not os.path.exists(os.path.expanduser(string)):
os.mkdir(os.path.expanduser(string))
elif not os.path.exists(os.path.abspath(string)):
os.mkdir(os.path.realpath(string))
# If existed, simply take that path
return os.path.realpath(string)
def get_arguments():
"""
Simply a function to get an Namespace object with attributes related to our
FlickrApi wrapper
:return (Namespace): a Namespace objects that hold the arguments passed
as attributes
"""
# create ArgumentParser object
parser = argparse.ArgumentParser(description="Flickr Mirroring")
# optional arguments
parser.add_argument(
"--cache-path",
help="specify the absolute path where the photos downloaded\
from Flickr need to be cached",
type=path, # check arguments as path
default=os.path.realpath(os.path.expanduser('~/.flickr/')))
parser.add_argument(
"--info-level",
help="specify the level of information of a photo to fetch\
(value between 0 and 2)",
choices=range(3), # only 3 levels allowed
type=int,
metavar="LEVEL",
default=0)
parser.add_argument(
"--save-api-keys",
help="specify whether to save the Flickr API keys for\
further usage",
action="store_true")
parser.add_argument(
"--cache-directory-depth",
help="depth of directory to save",
type=int,
metavar="",
default=4)
# only 1 download method is selected
group_download_method = parser.add_mutually_exclusive_group()
group_download_method.add_argument(
"--fifo",
help="specify the First-In First-Out method to mirror the\
user's photostream, from the oldest uploaded photo to\
the earliest",
action='store_true')
group_download_method.add_argument(
"--lifo",
help="specify the Last-In First-Out method to mirror the\
user's photostream, from the earliest uploaded photo\
to the oldest (default option)",
action='store_true')
# only 1 dowload data is selected
group_download_data = parser.add_mutually_exclusive_group()
group_download_data.add_argument(
"--image-only",
help="specify whether the script must only download photos\
images",
action="store_true")
group_download_data.add_argument(
"--info-only",
help="specify whether the script must only download photos'\
information",
action="store_true")
# required
parser.add_argument(
"--username",
help="username of the account of a user on Flickr to mirror\
their photostream",
required=True)
args = parser.parse_args()
# get the path we store the json file
path_save = args.cache_path + constants.CACHE_FILE
try:
# read cached file
with open(path_save, 'r') as file_save:
# get the previous used key
data = json.loads(file_save.read())
consumer_secret = data['consumer_secret']
consumer_key = data['consumer_key']
# if the cached file is not existed
except FileNotFoundError:
while True:
# Waypoint9
# prompt user to input api key and secret
consumer_key = \
getpass.getpass("Enter your Flickr API key:")
consumer_secret = \
getpass.getpass("Enter your Flickr API secret:")
payload = {
"method": "flickr.test.echo",
"api_key": consumer_key,
"format": "json",
"nojsoncallback": "1"
}
test_key = requests.get(constants.END_POINT,
params=payload)
if json.loads(test_key.text)["stat"] == "fail":
logging.warning(json.loads(test_key.text)['message'])
else:
break
# if save-api-keys option is selected
if args.save_api_keys is True:
if not os.path.exists(constants.DEFAULT_PATH):
os.mkdir(constants.DEFAULT_PATH)
# set up file path and data to write
data_save = {
"consumer_secret": consumer_secret,
"consumer_key": consumer_key
}
json_object = json.dumps(data_save, indent=4)
# create file if not existed with w+ mode
with open(path_save, "w+") as file_save:
# write data into the file
file_save.write(json_object)
# only user can edit the file
os.chmod(path_save,
stat.S_IRUSR |
stat.S_IWUSR)
# set the download stategy
if args.lifo:
# lifo if lifo is selected
strategy = CachingStrategy.LIFO
elif args.fifo:
# fifo if lifo is selected
strategy = CachingStrategy.FIFO
else:
# default is lifo
strategy = CachingStrategy.LIFO
# check some cases for image only and --info-level
if args.image_only is True and args.info_level != 0:
raise parser.error(
"--image-only is not allowed to have --info-level")
# return Namespace object
return argparse.Namespace(cache_path=args.cache_path,
image_only=args.image_only,
info_level=args.info_level,
info_only=args.info_only,
username=args.username,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
cache_directory_depth=args.cache_directory_depth,
cache_strategy=strategy) | PypiClean |
/Aglyph-3.0.0.tar.gz/Aglyph-3.0.0/test/__init__.py |
# Copyright (c) 2006, 2011, 2013-2017 Matthew Zipay.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Utilities and common setup for all unit test modules."""
__author__ = "Matthew Zipay <mattz@ninthtest.info>"
import codecs
from inspect import getsourcefile
import logging
import logging.config
import os
import unittest
from aglyph._compat import DataType, is_python_3
# always force tracing when running test suite
os.environ["AUTOLOGGING_TRACED_NOOP"] = ""
os.environ["AGLYPH_TRACED"] = "1"
from autologging import TRACE
__all__ = [
"assertRaisesWithMessage",
"find_resource",
"read_resource",
"suite",
]
#PYVER: can use TestCase.assertRaises as a context manager in 3.1+
def assertRaisesWithMessage(
test_case, e_expected, callable_, *args, **keywords):
"""Assert that *callable_* raises a specific exception, whose type
and message must match those of *e_expected*.
"""
try:
callable_(*args, **keywords)
except type(e_expected) as e_actual:
test_case.assertEqual(str(e_expected), str(e_actual))
else:
test_case.fail("did not raise %r" % e_expected)
def find_resource(relname):
"""Locate *relname* relative to the ``test`` package.
Return ``None`` if *relname* is not found.
"""
init_filename = getsourcefile(find_resource)
resource_filename = os.path.join(os.path.dirname(init_filename), relname)
return resource_filename if os.path.isfile(resource_filename) else None
def read_resource(relname, from_encoding="utf-8", to_encoding=None):
"""Return either unicode text or encoded bytes representing
the file system resource identified by *relname* (which must be
relative to the ``test`` package.
Return ``None`` if *relname* is not found.
"""
resource_filename = find_resource(relname)
if resource_filename is not None:
with codecs.open(resource_filename, encoding=from_encoding) as f:
resource = f.read()
return (
resource.encode(to_encoding) if to_encoding is not None
else resource)
def suite():
from test import (
# aglyph
test_format_dotted_name,
test_identify,
test_importable,
test_resolve_dotted_name,
# aglyph._compat
test_compat,
test_is_string,
test_name_of,
test_new_instance,
test_DoctypeTreeBuilder,
test_CLRXMLParser,
test_AglyphDefaultXMLParser,
# aglyph.component
test_Reference,
test_InitializationSupport,
test_Evaluator,
test_DependencySupport,
test_Template,
test_Component,
# aglyph.context
test_CreationBuilderMixin,
test_InjectionBuilderMixin,
test_LifecycleBuilderMixin,
test_RegistrationMixin,
test_TemplateBuilder,
test_ComponentBuilder,
test_ContextBuilder,
test_Context,
test_XMLContext,
# aglyph.assembler
test_ReentrantMutexCache,
test_Assembler,
)
suite = unittest.TestSuite()
# aglyph
suite.addTest(test_importable.suite())
suite.addTest(test_format_dotted_name.suite())
suite.addTest(test_resolve_dotted_name.suite())
suite.addTest(test_identify.suite())
# aglyph._compat
suite.addTest(test_compat.suite())
suite.addTest(test_is_string.suite())
suite.addTest(test_name_of.suite())
suite.addTest(test_new_instance.suite())
suite.addTest(test_DoctypeTreeBuilder.suite())
suite.addTest(test_CLRXMLParser.suite())
suite.addTest(test_AglyphDefaultXMLParser.suite())
# aglyph.component
suite.addTest(test_Reference.suite())
suite.addTest(test_InitializationSupport.suite())
suite.addTest(test_Evaluator.suite())
suite.addTest(test_DependencySupport.suite())
suite.addTest(test_Template.suite())
suite.addTest(test_Component.suite())
# aglyph.context
suite.addTest(test_CreationBuilderMixin.suite())
suite.addTest(test_InjectionBuilderMixin.suite())
suite.addTest(test_LifecycleBuilderMixin.suite())
suite.addTest(test_RegistrationMixin.suite())
suite.addTest(test_TemplateBuilder.suite())
suite.addTest(test_ComponentBuilder.suite())
suite.addTest(test_ContextBuilder.suite())
suite.addTest(test_Context.suite())
suite.addTest(test_XMLContext.suite())
# aglyph.assembler
suite.addTest(test_ReentrantMutexCache.suite())
suite.addTest(test_Assembler.suite())
return suite
logging.config.dictConfig({
"version": 1,
"formatters": {
"with-thread-id": {
"format":
"[%(levelname)-9s %(thread)08x %(name)s %(funcName)s]\n"
"%(message)s",
},
},
"handlers": {
"combined-file": {
"class": "logging.FileHandler",
"formatter": "with-thread-id",
"filename": os.path.normpath(
os.path.join(
os.path.dirname(suite.__code__.co_filename), "..",
"test.log")),
"mode": 'w'
},
},
"loggers": {
"test": {
"level": logging.DEBUG,
"propagate": False,
"handlers": ["combined-file"],
},
"aglyph": {
"level": TRACE,
"propagate": False,
"handlers": ["combined-file"],
}
},
})
# don't use __name__ here; can be run as "__main__"
_log = logging.getLogger("test")
# all the way down here so that the logging configuration is in place before
# anything from the "aglyph" namespace is imported
from aglyph import __version__ | PypiClean |
/GelReportModels-7.8.0.tar.gz/GelReportModels-7.8.0/protocols/reports_5_0_0.py | from protocols.protocol import ProtocolElement
from protocols.protocol import SearchRequest
from protocols.protocol import SearchResponse
from protocols.protocol import avro_parse
import avro.schema
version = '5.0.0'
class ACMGClassification(object):
"""
No documentation
"""
pathogenic_variant = "pathogenic_variant"
likely_pathogenic_variant = "likely_pathogenic_variant"
variant_of_unknown_clinical_significance = "variant_of_unknown_clinical_significance"
likely_benign_variant = "likely_benign_variant"
benign_variant = "benign_variant"
not_assessed = "not_assessed"
def __hash__(self):
return str(self).__hash__()
class Action(ProtocolElement):
"""
A clinical action
"""
_schemaSource = """
{"type": "record", "name": "Action", "namespace": "org.gel.models.report.avro", "doc": "", "fields":
[{"name": "actionType", "type": ["null", {"type": "enum", "name": "ActionType", "doc": "",
"symbols": ["therapy", "therapeutic", "prognosis", "diagnosis"]}], "doc": ""}, {"name":
"references", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "status",
"type": ["null", {"type": "enum", "name": "ActionStatus", "doc": "", "symbols": ["clinical",
"pre_clinical"]}], "doc": ""}, {"name": "variantActionable", "type": "boolean", "doc": ""}, {"name":
"url", "type": ["null", "string"], "doc": ""}, {"name": "evidenceType", "type": ["null", "string"],
"doc": ""}, {"name": "source", "type": "string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"actionType",
"evidenceType",
"references",
"source",
"status",
"url",
"variantActionable",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'actionType', 'evidenceType', 'references', 'source',
'status', 'url', 'variantActionable'
]
def __init__(self, **kwargs):
self.actionType = kwargs.get(
'actionType', None)
self.evidenceType = kwargs.get(
'evidenceType', None)
self.references = kwargs.get(
'references', None)
self.source = kwargs.get(
'source', None)
self.status = kwargs.get(
'status', None)
self.url = kwargs.get(
'url', None)
self.variantActionable = kwargs.get(
'variantActionable', None)
class ActionStatus(object):
"""
Clinical status of an action
"""
clinical = "clinical"
pre_clinical = "pre_clinical"
def __hash__(self):
return str(self).__hash__()
class ActionType(object):
"""
Type of clinical action on a variant
"""
therapy = "therapy"
therapeutic = "therapeutic"
prognosis = "prognosis"
diagnosis = "diagnosis"
def __hash__(self):
return str(self).__hash__()
class Actionability(object):
"""
No documentation
"""
yes = "yes"
no = "no"
not_yet = "not_yet"
na = "na"
def __hash__(self):
return str(self).__hash__()
class AdditionalAnalysisPanel(ProtocolElement):
"""
A panel of genes and the specific disease that it assesses
"""
_schemaSource = """
{"type": "record", "name": "AdditionalAnalysisPanel", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "specificDisease", "type": "string"}, {"name": "panel", "type":
{"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName", "type":
"string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"panel",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'panel': GenePanel,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'panel': GenePanel,
}
return embeddedTypes[fieldName]
__slots__ = [
'panel', 'specificDisease'
]
def __init__(self, **kwargs):
self.panel = kwargs.get(
'panel', GenePanel())
self.specificDisease = kwargs.get(
'specificDisease', None)
class AdditionalVariantsQuestions(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "AdditionalVariantsQuestions", "namespace": "org.gel.models.report.avro",
"fields": [{"name": "variantDetails", "type": "string", "doc": ""}, {"name": "variantActionability",
"type": {"type": "array", "items": {"type": "enum", "name": "CancerActionability", "doc": "",
"symbols": ["germline_susceptibility", "predicts_therapeutic_response", "prognostic",
"defines_diagnosis_group", "eligibility_for_trial", "other"]}}, "doc": ""}, {"name":
"otherVariantActionability", "type": ["null", "string"]}, {"name": "variantUsability", "type":
{"type": "enum", "name": "CancerUsabilitySomatic", "doc": "", "symbols": ["already_actioned",
"actioned_result_of_this_wga", "not_yet_actioned"]}, "doc": ""}, {"name": "variantTested", "type":
{"type": "enum", "name": "CancerTestedAdditional", "doc": "", "symbols":
["not_indicated_for_patient_care", "no_orthologous_test_available", "test_performed_prior_to_wga",
"technical_validation_following_wga", "na"]}, "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"otherVariantActionability",
"validationAssayType",
"variantActionability",
"variantDetails",
"variantTested",
"variantUsability",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'otherVariantActionability', 'validationAssayType',
'variantActionability', 'variantDetails', 'variantTested',
'variantUsability'
]
def __init__(self, **kwargs):
self.otherVariantActionability = kwargs.get(
'otherVariantActionability', None)
self.validationAssayType = kwargs.get(
'validationAssayType', None)
self.variantActionability = kwargs.get(
'variantActionability', None)
self.variantDetails = kwargs.get(
'variantDetails', None)
self.variantTested = kwargs.get(
'variantTested', None)
self.variantUsability = kwargs.get(
'variantUsability', None)
class AdoptedStatus(object):
"""
adoptedin means adopted into the family adoptedout means child
belonged to the family and was adopted out
"""
notadopted = "notadopted"
adoptedin = "adoptedin"
adoptedout = "adoptedout"
def __hash__(self):
return str(self).__hash__()
class AffectionStatus(object):
"""
Affection Status
"""
UNAFFECTED = "UNAFFECTED"
AFFECTED = "AFFECTED"
UNCERTAIN = "UNCERTAIN"
def __hash__(self):
return str(self).__hash__()
class AgeOfOnset(object):
"""
No documentation
"""
EMBRYONAL_ONSET = "EMBRYONAL_ONSET"
FETAL_ONSET = "FETAL_ONSET"
NEONATAL_ONSET = "NEONATAL_ONSET"
INFANTILE_ONSET = "INFANTILE_ONSET"
CHILDHOOD_ONSET = "CHILDHOOD_ONSET"
JUVENILE_ONSET = "JUVENILE_ONSET"
YOUNG_ADULT_ONSET = "YOUNG_ADULT_ONSET"
LATE_ONSET = "LATE_ONSET"
MIDDLE_AGE_ONSET = "MIDDLE_AGE_ONSET"
def __hash__(self):
return str(self).__hash__()
class AlleleFrequency(ProtocolElement):
"""
The population allele frequency of a given variant in a given
study and optionally population
"""
_schemaSource = """
{"type": "record", "name": "AlleleFrequency", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "study", "type": "string", "doc": ""}, {"name": "population", "type": "string",
"doc": ""}, {"name": "alternateFrequency", "type": "float", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"alternateFrequency",
"population",
"study",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'alternateFrequency', 'population', 'study'
]
def __init__(self, **kwargs):
self.alternateFrequency = kwargs.get(
'alternateFrequency', None)
self.population = kwargs.get(
'population', None)
self.study = kwargs.get(
'study', None)
class AlleleOrigin(object):
"""
Allele origin. * `SO_0001781`: de novo variant.
http://purl.obolibrary.org/obo/SO_0001781 * `SO_0001778`: germline
variant. http://purl.obolibrary.org/obo/SO_0001778 * `SO_0001775`:
maternal variant. http://purl.obolibrary.org/obo/SO_0001775 *
`SO_0001776`: paternal variant.
http://purl.obolibrary.org/obo/SO_0001776 * `SO_0001779`: pedigree
specific variant. http://purl.obolibrary.org/obo/SO_0001779 *
`SO_0001780`: population specific variant.
http://purl.obolibrary.org/obo/SO_0001780 * `SO_0001777`: somatic
variant. http://purl.obolibrary.org/obo/SO_0001777
"""
de_novo_variant = "de_novo_variant"
germline_variant = "germline_variant"
maternal_variant = "maternal_variant"
paternal_variant = "paternal_variant"
pedigree_specific_variant = "pedigree_specific_variant"
population_specific_variant = "population_specific_variant"
somatic_variant = "somatic_variant"
def __hash__(self):
return str(self).__hash__()
class AnalysisPanel(ProtocolElement):
"""
An analysis panel
"""
_schemaSource = """
{"type": "record", "name": "AnalysisPanel", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "specificDisease", "type": "string", "doc": ""}, {"name": "panelName",
"type": "string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""},
{"name": "reviewOutcome", "type": "string", "doc": ""}, {"name": "multipleGeneticOrigins", "type":
"string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"multipleGeneticOrigins",
"panelName",
"panelVersion",
"reviewOutcome",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'multipleGeneticOrigins', 'panelName', 'panelVersion',
'reviewOutcome', 'specificDisease'
]
def __init__(self, **kwargs):
self.multipleGeneticOrigins = kwargs.get(
'multipleGeneticOrigins', None)
self.panelName = kwargs.get(
'panelName', None)
self.panelVersion = kwargs.get(
'panelVersion', None)
self.reviewOutcome = kwargs.get(
'reviewOutcome', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class Ancestries(ProtocolElement):
"""
Ancestries, defined as Ethnic category(ies) and Chi-square test
"""
_schemaSource = """
{"type": "record", "name": "Ancestries", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "mothersEthnicOrigin", "type": ["null", {"type": "enum", "name":
"EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A", "B", "C", "L", "M", "N", "H", "J",
"K", "P", "S", "R", "Z"]}], "doc": ""}, {"name": "mothersOtherRelevantAncestry", "type": ["null",
"string"], "doc": ""}, {"name": "fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc":
""}, {"name": "fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"chiSquare1KGenomesPhase3Pop",
"fathersEthnicOrigin",
"fathersOtherRelevantAncestry",
"mothersEthnicOrigin",
"mothersOtherRelevantAncestry",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'chiSquare1KGenomesPhase3Pop': ChiSquare1KGenomesPhase3Pop,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'chiSquare1KGenomesPhase3Pop': ChiSquare1KGenomesPhase3Pop,
}
return embeddedTypes[fieldName]
__slots__ = [
'chiSquare1KGenomesPhase3Pop', 'fathersEthnicOrigin',
'fathersOtherRelevantAncestry', 'mothersEthnicOrigin',
'mothersOtherRelevantAncestry'
]
def __init__(self, **kwargs):
self.chiSquare1KGenomesPhase3Pop = kwargs.get(
'chiSquare1KGenomesPhase3Pop', None)
self.fathersEthnicOrigin = kwargs.get(
'fathersEthnicOrigin', None)
self.fathersOtherRelevantAncestry = kwargs.get(
'fathersOtherRelevantAncestry', None)
self.mothersEthnicOrigin = kwargs.get(
'mothersEthnicOrigin', None)
self.mothersOtherRelevantAncestry = kwargs.get(
'mothersOtherRelevantAncestry', None)
class Assembly(object):
"""
The reference genome assembly
"""
GRCh38 = "GRCh38"
GRCh37 = "GRCh37"
def __hash__(self):
return str(self).__hash__()
class AuditLog(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "AuditLog", "namespace": "org.gel.models.report.avro", "fields":
[{"name": "interpretationRequestId", "type": "string", "doc": ""}, {"name":
"interpretationRequestVersion", "type": "string", "doc": ""}, {"name": "code", "type": {"type":
"enum", "name": "Code", "doc": "", "symbols": ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7"]}},
{"name": "caseShared", "type": ["null", {"type": "record", "name": "CaseShared", "fields": [{"name":
"previousGroups", "type": {"type": "array", "items": "string"}}, {"name": "modifiedGroups", "type":
{"type": "array", "items": "string"}}]}]}, {"name": "supportingEvidences", "type": ["null", {"type":
"record", "name": "SupportingEvidences", "fields": [{"name": "previousSupportingEvidences", "type":
{"type": "array", "items": "string"}}, {"name": "modifiedSupportingEvidences", "type": {"type":
"array", "items": "string"}}]}]}, {"name": "modifiedVariants", "type": ["null", {"type": "array",
"items": {"type": "record", "name": "ModifiedVariant", "fields": [{"name": "previousVariant",
"type": {"type": "record", "name": "ReportedVariant", "doc": "", "fields": [{"name":
"variantCoordinates", "type": {"type": "record", "name": "VariantCoordinates", "doc": "", "fields":
[{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position", "type": "int", "doc":
""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate", "type": "string",
"doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly", "doc": "", "symbols":
["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type": ["null", "string"],
"doc": ""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items": "string"}], "doc":
""}, {"name": "clinVarIds", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "genomicChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "cdnaChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"proteinChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"variantCalls", "type": {"type": "array", "items": {"type": "record", "name": "VariantCall", "doc":
"", "fields": [{"name": "participantId", "type": "string", "doc": ""}, {"name": "sampleId", "type":
"string", "doc": ""}, {"name": "zygosity", "type": {"type": "enum", "name": "Zygosity", "doc": "",
"symbols": ["reference_homozygous", "heterozygous", "alternate_homozygous", "missing",
"half_missing_reference", "half_missing_alternate", "alternate_hemizigous", "reference_hemizigous",
"unk", "na"]}, "doc": ""}, {"name": "phaseSet", "type": ["null", "int"], "doc": ""}, {"name": "vaf",
"type": ["null", "double"], "doc": ""}, {"name": "depthReference", "type": ["null", "int"], "doc":
""}, {"name": "depthAlternate", "type": ["null", "int"], "doc": ""}, {"name": "alleleOrigins",
"type": {"type": "array", "items": {"type": "enum", "name": "AlleleOrigin", "doc": "", "symbols":
["de_novo_variant", "germline_variant", "maternal_variant", "paternal_variant",
"pedigree_specific_variant", "population_specific_variant", "somatic_variant"]}}, "doc": ""}]}},
"doc": ""}, {"name": "reportEvents", "type": {"type": "array", "items": {"type": "record", "name":
"ReportEvent", "doc": "", "fields": [{"name": "reportEventId", "type": "string", "doc": ""},
{"name": "phenotypes", "type": {"type": "array", "items": "string"}, "doc": ""}, {"name":
"variantConsequences", "type": {"type": "array", "items": {"type": "record", "name":
"VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""}, {"name":
"name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel", "type":
["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName", "type":
"string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}], "doc":
""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name": "ReportedModeOfInheritance",
"doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance", "namespace":
"org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}},
{"name": "modifiedVariant", "type": "ReportedVariant"}]}}]}, {"name": "addedVariants", "type":
["null", {"type": "array", "items": "ReportedVariant"}]}, {"name": "removedVariants", "type":
["null", {"type": "array", "items": "ReportedVariant"}]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"addedVariants",
"caseShared",
"code",
"interpretationRequestId",
"interpretationRequestVersion",
"modifiedVariants",
"removedVariants",
"supportingEvidences",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'addedVariants': ReportedVariant,
'caseShared': CaseShared,
'modifiedVariants': ModifiedVariant,
'removedVariants': ReportedVariant,
'supportingEvidences': SupportingEvidences,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'addedVariants': ReportedVariant,
'caseShared': CaseShared,
'modifiedVariants': ModifiedVariant,
'removedVariants': ReportedVariant,
'supportingEvidences': SupportingEvidences,
}
return embeddedTypes[fieldName]
__slots__ = [
'addedVariants', 'caseShared', 'code',
'interpretationRequestId', 'interpretationRequestVersion',
'modifiedVariants', 'removedVariants', 'supportingEvidences'
]
def __init__(self, **kwargs):
self.addedVariants = kwargs.get(
'addedVariants', None)
self.caseShared = kwargs.get(
'caseShared', None)
self.code = kwargs.get(
'code', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.modifiedVariants = kwargs.get(
'modifiedVariants', None)
self.removedVariants = kwargs.get(
'removedVariants', None)
self.supportingEvidences = kwargs.get(
'supportingEvidences', None)
class CancerActionability(object):
"""
An enumeration Variant Actionability: *
`predicts_therapeutic_response`: Predicts therapeutic response
* `prognostic`: Prognostic * `defines_diagnosis_group`:
Defines diagnosis group * `eligibility_for_trial`:
Eligibility for trial * `germline_susceptibility`: Germline
susceptibility * `other`: Other (please specify)
"""
germline_susceptibility = "germline_susceptibility"
predicts_therapeutic_response = "predicts_therapeutic_response"
prognostic = "prognostic"
defines_diagnosis_group = "defines_diagnosis_group"
eligibility_for_trial = "eligibility_for_trial"
other = "other"
def __hash__(self):
return str(self).__hash__()
class CancerActionabilitySomatic(object):
"""
The variant actionabilities: * `predicts_therapeutic_response`:
Predicts therapeutic response * `prognostic`: Prognostic *
`defines_diagnosis_group`: Defines diagnosis group *
`eligibility_for_trial`: Eligibility for trial * `other`: Other
(please specify)
"""
predicts_therapeutic_response = "predicts_therapeutic_response"
prognostic = "prognostic"
defines_diagnosis_group = "defines_diagnosis_group"
eligibility_for_trial = "eligibility_for_trial"
other = "other"
def __hash__(self):
return str(self).__hash__()
class CancerActionableVariants(object):
"""
Are the variants actionable? * `yes`: yes * `no`: no
"""
yes = "yes"
no = "no"
def __hash__(self):
return str(self).__hash__()
class CancerCaseLevelQuestions(ProtocolElement):
"""
The questions for the cancer program exit questionnaire at case
level
"""
_schemaSource = """
{"type": "record", "name": "CancerCaseLevelQuestions", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "total_review_time", "type": "double", "doc": ""}, {"name":
"mdt1_time", "type": "double", "doc": ""}, {"name": "mdt2_time", "type": ["null", "double"], "doc":
""}, {"name": "validation_assay_time", "type": ["null", "double"], "doc": ""}, {"name":
"wet_validation_time", "type": ["null", "double"], "doc": ""}, {"name":
"analytical_validation_time", "type": ["null", "double"], "doc": ""}, {"name":
"primary_reporting_time", "type": "double", "doc": ""}, {"name": "primary_authorisation_time",
"type": "double", "doc": ""}, {"name": "report_distribution_time", "type": "double", "doc": ""},
{"name": "total_time", "type": "double", "doc": ""}, {"name": "reviewedInMdtWga", "type": {"type":
"enum", "name": "ReviewedParts", "doc": "", "symbols": ["domain_1", "domain_1_and_2",
"domain_1_2_and_suplementary"]}, "doc": ""}, {"name": "actionableVariants", "type": {"type": "enum",
"name": "CancerActionableVariants", "doc": "", "symbols": ["yes", "no"]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"actionableVariants",
"analytical_validation_time",
"mdt1_time",
"mdt2_time",
"primary_authorisation_time",
"primary_reporting_time",
"report_distribution_time",
"reviewedInMdtWga",
"total_review_time",
"total_time",
"validation_assay_time",
"wet_validation_time",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'actionableVariants', 'analytical_validation_time',
'mdt1_time', 'mdt2_time', 'primary_authorisation_time',
'primary_reporting_time', 'report_distribution_time',
'reviewedInMdtWga', 'total_review_time', 'total_time',
'validation_assay_time', 'wet_validation_time'
]
def __init__(self, **kwargs):
self.actionableVariants = kwargs.get(
'actionableVariants', None)
self.analytical_validation_time = kwargs.get(
'analytical_validation_time', None)
self.mdt1_time = kwargs.get(
'mdt1_time', None)
self.mdt2_time = kwargs.get(
'mdt2_time', None)
self.primary_authorisation_time = kwargs.get(
'primary_authorisation_time', None)
self.primary_reporting_time = kwargs.get(
'primary_reporting_time', None)
self.report_distribution_time = kwargs.get(
'report_distribution_time', None)
self.reviewedInMdtWga = kwargs.get(
'reviewedInMdtWga', None)
self.total_review_time = kwargs.get(
'total_review_time', None)
self.total_time = kwargs.get(
'total_time', None)
self.validation_assay_time = kwargs.get(
'validation_assay_time', None)
self.wet_validation_time = kwargs.get(
'wet_validation_time', None)
class CancerExitQuestionnaire(ProtocolElement):
"""
The cancer program exit questionnaire
"""
_schemaSource = """
{"type": "record", "name": "CancerExitQuestionnaire", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "eventDate", "type": "string", "doc": ""}, {"name": "reporter",
"type": "string", "doc": ""}, {"name": "caseLevelQuestions", "type": {"type": "record", "name":
"CancerCaseLevelQuestions", "doc": "", "fields": [{"name": "total_review_time", "type": "double",
"doc": ""}, {"name": "mdt1_time", "type": "double", "doc": ""}, {"name": "mdt2_time", "type":
["null", "double"], "doc": ""}, {"name": "validation_assay_time", "type": ["null", "double"], "doc":
""}, {"name": "wet_validation_time", "type": ["null", "double"], "doc": ""}, {"name":
"analytical_validation_time", "type": ["null", "double"], "doc": ""}, {"name":
"primary_reporting_time", "type": "double", "doc": ""}, {"name": "primary_authorisation_time",
"type": "double", "doc": ""}, {"name": "report_distribution_time", "type": "double", "doc": ""},
{"name": "total_time", "type": "double", "doc": ""}, {"name": "reviewedInMdtWga", "type": {"type":
"enum", "name": "ReviewedParts", "doc": "", "symbols": ["domain_1", "domain_1_and_2",
"domain_1_2_and_suplementary"]}, "doc": ""}, {"name": "actionableVariants", "type": {"type": "enum",
"name": "CancerActionableVariants", "doc": "", "symbols": ["yes", "no"]}, "doc": ""}]}, "doc": ""},
{"name": "somaticVariantLevelQuestions", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "CancerSomaticVariantLevelQuestions", "doc": "", "fields": [{"name":
"variantDetails", "type": "string", "doc": ""}, {"name": "variantActionability", "type": {"type":
"array", "items": {"type": "enum", "name": "CancerActionabilitySomatic", "doc": "", "symbols":
["predicts_therapeutic_response", "prognostic", "defines_diagnosis_group", "eligibility_for_trial",
"other"]}}, "doc": ""}, {"name": "otherVariantActionability", "type": ["null", "string"], "doc":
""}, {"name": "variantUsability", "type": {"type": "enum", "name": "CancerUsabilitySomatic", "doc":
"", "symbols": ["already_actioned", "actioned_result_of_this_wga", "not_yet_actioned"]}, "doc": ""},
{"name": "variantTested", "type": {"type": "enum", "name": "CancerTested", "doc": "", "symbols":
["not_indicated_for_patient_care", "no_orthologous_test_available", "test_performed_prior_to_wga",
"technical_validation_following_wga"]}, "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}}], "doc": ""}, {"name": "germlineVariantLevelQuestions", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "CancerGermlineVariantLevelQuestions", "doc":
"", "fields": [{"name": "variantDetails", "type": "string", "doc": ""}, {"name":
"variantActionability", "type": {"type": "array", "items": {"type": "enum", "name":
"CancerActionability", "doc": "", "symbols": ["germline_susceptibility",
"predicts_therapeutic_response", "prognostic", "defines_diagnosis_group", "eligibility_for_trial",
"other"]}}, "doc": ""}, {"name": "otherVariantActionability", "type": ["null", "string"]}, {"name":
"variantUsability", "type": {"type": "enum", "name": "CancerUsabilityGermline", "doc": "",
"symbols": ["already_actioned", "actioned_result_of_this_wga"]}, "doc": ""}, {"name":
"variantTested", "type": "CancerTested", "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}}], "doc": ""}, {"name": "additionalComments", "type": ["null", "string"],
"doc": ""}, {"name": "otherActionableVariants", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "AdditionalVariantsQuestions", "fields": [{"name": "variantDetails", "type":
"string", "doc": ""}, {"name": "variantActionability", "type": {"type": "array", "items":
"CancerActionability"}, "doc": ""}, {"name": "otherVariantActionability", "type": ["null",
"string"]}, {"name": "variantUsability", "type": "CancerUsabilitySomatic", "doc": ""}, {"name":
"variantTested", "type": {"type": "enum", "name": "CancerTestedAdditional", "doc": "", "symbols":
["not_indicated_for_patient_care", "no_orthologous_test_available", "test_performed_prior_to_wga",
"technical_validation_following_wga", "na"]}, "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalComments",
"caseLevelQuestions",
"eventDate",
"germlineVariantLevelQuestions",
"otherActionableVariants",
"reporter",
"somaticVariantLevelQuestions",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'caseLevelQuestions': CancerCaseLevelQuestions,
'germlineVariantLevelQuestions': CancerGermlineVariantLevelQuestions,
'otherActionableVariants': AdditionalVariantsQuestions,
'somaticVariantLevelQuestions': CancerSomaticVariantLevelQuestions,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'caseLevelQuestions': CancerCaseLevelQuestions,
'germlineVariantLevelQuestions': CancerGermlineVariantLevelQuestions,
'otherActionableVariants': AdditionalVariantsQuestions,
'somaticVariantLevelQuestions': CancerSomaticVariantLevelQuestions,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalComments', 'caseLevelQuestions', 'eventDate',
'germlineVariantLevelQuestions', 'otherActionableVariants',
'reporter', 'somaticVariantLevelQuestions'
]
def __init__(self, **kwargs):
self.additionalComments = kwargs.get(
'additionalComments', None)
self.caseLevelQuestions = kwargs.get(
'caseLevelQuestions', CancerCaseLevelQuestions())
self.eventDate = kwargs.get(
'eventDate', None)
self.germlineVariantLevelQuestions = kwargs.get(
'germlineVariantLevelQuestions', None)
self.otherActionableVariants = kwargs.get(
'otherActionableVariants', None)
self.reporter = kwargs.get(
'reporter', None)
self.somaticVariantLevelQuestions = kwargs.get(
'somaticVariantLevelQuestions', None)
class CancerGermlineVariantLevelQuestions(ProtocolElement):
"""
The questions for the cancer program exit questionnaire for
germline variants
"""
_schemaSource = """
{"type": "record", "name": "CancerGermlineVariantLevelQuestions", "namespace":
"org.gel.models.report.avro", "doc": "", "fields": [{"name": "variantDetails", "type": "string",
"doc": ""}, {"name": "variantActionability", "type": {"type": "array", "items": {"type": "enum",
"name": "CancerActionability", "doc": "", "symbols": ["germline_susceptibility",
"predicts_therapeutic_response", "prognostic", "defines_diagnosis_group", "eligibility_for_trial",
"other"]}}, "doc": ""}, {"name": "otherVariantActionability", "type": ["null", "string"]}, {"name":
"variantUsability", "type": {"type": "enum", "name": "CancerUsabilityGermline", "doc": "",
"symbols": ["already_actioned", "actioned_result_of_this_wga"]}, "doc": ""}, {"name":
"variantTested", "type": {"type": "enum", "name": "CancerTested", "doc": "", "symbols":
["not_indicated_for_patient_care", "no_orthologous_test_available", "test_performed_prior_to_wga",
"technical_validation_following_wga"]}, "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"otherVariantActionability",
"validationAssayType",
"variantActionability",
"variantDetails",
"variantTested",
"variantUsability",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'otherVariantActionability', 'validationAssayType',
'variantActionability', 'variantDetails', 'variantTested',
'variantUsability'
]
def __init__(self, **kwargs):
self.otherVariantActionability = kwargs.get(
'otherVariantActionability', None)
self.validationAssayType = kwargs.get(
'validationAssayType', None)
self.variantActionability = kwargs.get(
'variantActionability', None)
self.variantDetails = kwargs.get(
'variantDetails', None)
self.variantTested = kwargs.get(
'variantTested', None)
self.variantUsability = kwargs.get(
'variantUsability', None)
class CancerInterpretationRequest(ProtocolElement):
"""
This record represents basic information for this report
"""
_schemaSource = """
{"type": "record", "name": "CancerInterpretationRequest", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "versionControl", "type": {"type": "record", "name":
"ReportVersionControl", "fields": [{"name": "gitVersionControl", "type": "string", "doc": "",
"default": "5.0.0"}]}, "doc": ""}, {"name": "interpretationRequestId", "type": "string", "doc": ""},
{"name": "interpretationRequestVersion", "type": "int", "doc": ""}, {"name": "internalStudyId",
"type": "string", "doc": ""}, {"name": "genomeAssembly", "type": {"type": "enum", "name":
"Assembly", "doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}, {"name": "workspace", "type":
{"type": "array", "items": "string"}, "doc": ""}, {"name": "bams", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "File", "doc": "", "fields": [{"name": "sampleId",
"type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "uriFile", "type":
"string", "doc": ""}, {"name": "fileType", "type": {"type": "enum", "name": "FileType", "symbols":
["BAM", "gVCF", "VCF_small", "VCF_somatic_small", "VCF_CNV", "VCF_somatic_CNV", "VCF_SV",
"VCF_somatic_SV", "VCF_SV_CNV", "SVG", "ANN", "BigWig", "MD5Sum", "ROH", "OTHER", "PARTITION",
"VARIANT_FREQUENCIES", "COVERAGE"]}, "doc": ""}, {"name": "md5Sum", "type": ["null", "string"],
"doc": ""}]}}], "doc": ""}, {"name": "vcfs", "type": ["null", {"type": "array", "items": "File"}],
"doc": ""}, {"name": "bigWigs", "type": ["null", {"type": "array", "items": "File"}], "doc": ""},
{"name": "annotationFile", "type": ["null", "File"], "doc": ""}, {"name": "otherFiles", "type":
["null", {"type": "map", "values": "File"}], "doc": ""}, {"name": "cancerParticipant", "type":
["null", {"type": "record", "name": "CancerParticipant", "namespace":
"org.gel.models.participant.avro", "doc": "", "fields": [{"name": "yearOfBirth", "type": ["null",
"int"], "doc": ""}, {"name": "morphology", "type": ["null", {"type": "array", "items": "string"}],
"doc": ""}, {"name": "readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "consentStatus",
"type": ["null", {"type": "record", "name": "ConsentStatus", "doc": "", "fields": [{"name":
"programmeConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"primaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"secondaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""}, {"name":
"center", "type": ["null", "string"], "doc": ""}, {"name": "individualId", "type": "string", "doc":
""}, {"name": "primaryDiagnosisDisease", "type": ["null", {"type": "array", "items": "string"}],
"doc": ""}, {"name": "primaryDiagnosisSubDisease", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "",
"symbols": ["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "additionalInformation", "type":
["null", {"type": "map", "values": "string"}], "doc": ""}, {"name": "assignedICD10", "type":
["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "tumourSamples", "type":
{"type": "array", "items": {"type": "record", "name": "TumourSample", "doc": "", "fields": [{"name":
"sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int", "doc": ""},
{"name": "LDPCode", "type": "string", "doc": ""}, {"name": "tumourId", "type": "string", "doc": ""},
{"name": "programmePhase", "type": ["null", {"type": "enum", "name": "ProgrammePhase", "symbols":
["CRUK", "OXFORD", "CLL", "IIP", "MAIN", "EXPT"]}], "doc": ""}, {"name": "diseaseType", "type":
["null", {"type": "enum", "name": "diseaseType", "symbols": ["ADULT_GLIOMA", "BLADDER", "BREAST",
"CARCINOMA_OF_UNKNOWN_PRIMARY", "CHILDHOOD", "COLORECTAL", "ENDOMETRIAL_CARCINOMA", "HAEMONC",
"HEPATOPANCREATOBILIARY", "LUNG", "MALIGNANT_MELANOMA", "NASOPHARYNGEAL", "ORAL_OROPHARYNGEAL",
"OVARIAN", "PROSTATE", "RENAL", "SARCOMA", "SINONASAL", "TESTICULAR_GERM_CELL_TUMOURS",
"UPPER_GASTROINTESTINAL", "NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE", "CLASSICAL_HODGKINS",
"NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS", "T_CELL_LYMPHOMA"]}], "doc": ""}, {"name":
"diseaseSubType", "type": ["null", "string"], "doc": ""}, {"name": "clinicalSampleDateTime", "type":
["null", "string"], "doc": ""}, {"name": "tumourType", "type": ["null", {"type": "enum", "name":
"TumourType", "symbols": ["PRIMARY", "METASTATIC_RECURRENCE", "RECURRENCE_OF_PRIMARY_TUMOUR",
"METASTASES"]}], "doc": ""}, {"name": "tumourContent", "type": ["null", {"type": "enum", "name":
"TumourContent", "symbols": ["High", "Medium", "Low"]}], "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "doc": "", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type":
"enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "tissueSource", "type": ["null", {"type":
"enum", "name": "TissueSource", "symbols": ["BMA_TUMOUR_SORTED_CELLS", "CT_GUIDED_BIOPSY",
"ENDOSCOPIC_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA",
"LAPAROSCOPIC_BIOPSY", "LAPAROSCOPIC_EXCISION", "MRI_GUIDED_BIOPSY", "NON_GUIDED_BIOPSY",
"SURGICAL_RESECTION", "STEREOTACTICALLY_GUIDED_BIOPSY", "USS_GUIDED_BIOPSY",
"NON_STANDARD_BIOPSY"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "morphologyICD", "type": ["null",
"string"], "doc": ""}, {"name": "morphologySnomedCT", "type": ["null", "string"], "doc": ""},
{"name": "morphologySnomedRT", "type": ["null", "string"], "doc": ""}, {"name": "topographyICD",
"type": ["null", "string"], "doc": ""}, {"name": "topographySnomedCT", "type": ["null", "string"],
"doc": ""}, {"name": "topographySnomedRT", "type": ["null", "string"], "doc": ""}]}}, "doc": ""},
{"name": "germlineSamples", "type": {"type": "array", "items": {"type": "record", "name":
"GermlineSample", "doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name":
"labSampleId", "type": "int", "doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name":
"source", "type": ["null", "SampleSource"], "doc": ""}, {"name": "product", "type": ["null",
"Product"], "doc": ""}, {"name": "preparationMethod", "type": ["null", "PreparationMethod"], "doc":
""}, {"name": "programmePhase", "type": ["null", "ProgrammePhase"], "doc": ""}, {"name":
"clinicalSampleDateTime", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name":
"matchedSamples", "type": {"type": "array", "items": {"type": "record", "name": "MatchedSamples",
"doc": "", "fields": [{"name": "germlineSampleId", "type": ["null", "string"], "doc": ""}, {"name":
"tumourSampleId", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "versionControl",
"type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc": ""}]}], "doc": ""},
{"name": "otherFamilyHistory", "type": ["null", {"type": "record", "name": "OtherFamilyHistory",
"doc": "", "fields": [{"name": "maternalFamilyHistory", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "paternalFamilyHistory", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}]}], "doc": ""}, {"name": "genePanelsCoverage", "type": ["null",
{"type": "map", "values": {"type": "map", "values": {"type": "map", "values": "float"}}}], "doc":
""}, {"name": "interpretationFlags", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "InterpretationFlag", "doc": "", "fields": [{"name": "interpretationFlag", "type": {"type":
"enum", "name": "InterpretationFlags", "doc": "", "symbols": ["mixed_chemistries",
"mixedLab_preparation", "low_tumour_purity", "uniparental_isodisomy", "uniparental_heterodisomy",
"unusual_karyotype", "high_cnv_count", "high_estimate_human_contamination_fraction",
"mixed_recruiting_gmc", "suspected_mosaicism", "low_quality_sample", "ffpe_tumour_sample",
"ff_nano_tumour_sample", "missing_values_for_proband_in_reported_variant", "reissued",
"supplementary_report_errors", "internal_use_only", "high_priority", "other"]}, "doc": ""}, {"name":
"additionalDescription", "type": ["null", "string"], "doc": ""}]}}], "doc": ""}, {"name":
"additionalInfo", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInfo",
"annotationFile",
"bams",
"bigWigs",
"cancerParticipant",
"genePanelsCoverage",
"genomeAssembly",
"internalStudyId",
"interpretationFlags",
"interpretationRequestId",
"interpretationRequestVersion",
"otherFamilyHistory",
"otherFiles",
"vcfs",
"versionControl",
"workspace",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'annotationFile': File,
'bams': File,
'bigWigs': File,
'cancerParticipant': CancerParticipant,
'interpretationFlags': InterpretationFlag,
'otherFamilyHistory': OtherFamilyHistory,
'otherFiles': File,
'vcfs': File,
'versionControl': ReportVersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'annotationFile': File,
'bams': File,
'bigWigs': File,
'cancerParticipant': CancerParticipant,
'interpretationFlags': InterpretationFlag,
'otherFamilyHistory': OtherFamilyHistory,
'otherFiles': File,
'vcfs': File,
'versionControl': ReportVersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInfo', 'annotationFile', 'bams', 'bigWigs',
'cancerParticipant', 'genePanelsCoverage', 'genomeAssembly',
'internalStudyId', 'interpretationFlags',
'interpretationRequestId', 'interpretationRequestVersion',
'otherFamilyHistory', 'otherFiles', 'vcfs', 'versionControl',
'workspace'
]
def __init__(self, **kwargs):
self.additionalInfo = kwargs.get(
'additionalInfo', None)
self.annotationFile = kwargs.get(
'annotationFile', None)
self.bams = kwargs.get(
'bams', None)
self.bigWigs = kwargs.get(
'bigWigs', None)
self.cancerParticipant = kwargs.get(
'cancerParticipant', None)
self.genePanelsCoverage = kwargs.get(
'genePanelsCoverage', None)
self.genomeAssembly = kwargs.get(
'genomeAssembly', None)
self.internalStudyId = kwargs.get(
'internalStudyId', None)
self.interpretationFlags = kwargs.get(
'interpretationFlags', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.otherFamilyHistory = kwargs.get(
'otherFamilyHistory', None)
self.otherFiles = kwargs.get(
'otherFiles', None)
self.vcfs = kwargs.get(
'vcfs', None)
self.versionControl = kwargs.get(
'versionControl', ReportVersionControl())
self.workspace = kwargs.get(
'workspace', None)
class CancerInterpretedGenome(ProtocolElement):
"""
A interpreted genome for the cancer program. This holds the list
of candidate variants reported by an interpretation service
together with all the relevant information that identify the case
and how these conclusions were reached.
"""
_schemaSource = """
{"type": "record", "name": "CancerInterpretedGenome", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "versionControl", "type": {"type": "record", "name":
"ReportVersionControl", "fields": [{"name": "gitVersionControl", "type": "string", "doc": "",
"default": "5.0.0"}]}, "doc": ""}, {"name": "interpretationRequestId", "type": "string", "doc": ""},
{"name": "interpretationRequestVersion", "type": "int", "doc": ""}, {"name":
"interpretationService", "type": "string", "doc": ""}, {"name": "reportUrl", "type": ["null",
"string"], "doc": ""}, {"name": "variants", "type": {"type": "array", "items": {"type": "record",
"name": "ReportedVariantCancer", "doc": "", "fields": [{"name": "variantCoordinates", "type":
{"type": "record", "name": "VariantCoordinates", "doc": "", "fields": [{"name": "chromosome",
"type": "string", "doc": ""}, {"name": "position", "type": "int", "doc": ""}, {"name": "reference",
"type": "string", "doc": ""}, {"name": "alternate", "type": "string", "doc": ""}, {"name":
"assembly", "type": {"type": "enum", "name": "Assembly", "doc": "", "symbols": ["GRCh38",
"GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type": ["null", "string"], "doc": ""},
{"name": "cosmicIds", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"clinVarIds", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"genomicChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"cdnaChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"proteinChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"variantCalls", "type": {"type": "array", "items": {"type": "record", "name": "VariantCall", "doc":
"", "fields": [{"name": "participantId", "type": "string", "doc": ""}, {"name": "sampleId", "type":
"string", "doc": ""}, {"name": "zygosity", "type": {"type": "enum", "name": "Zygosity", "doc": "",
"symbols": ["reference_homozygous", "heterozygous", "alternate_homozygous", "missing",
"half_missing_reference", "half_missing_alternate", "alternate_hemizigous", "reference_hemizigous",
"unk", "na"]}, "doc": ""}, {"name": "phaseSet", "type": ["null", "int"], "doc": ""}, {"name": "vaf",
"type": ["null", "double"], "doc": ""}, {"name": "depthReference", "type": ["null", "int"], "doc":
""}, {"name": "depthAlternate", "type": ["null", "int"], "doc": ""}, {"name": "alleleOrigins",
"type": {"type": "array", "items": {"type": "enum", "name": "AlleleOrigin", "doc": "", "symbols":
["de_novo_variant", "germline_variant", "maternal_variant", "paternal_variant",
"pedigree_specific_variant", "population_specific_variant", "somatic_variant"]}}, "doc": ""}]}},
"doc": ""}, {"name": "reportEvents", "type": {"type": "array", "items": {"type": "record", "name":
"ReportEventCancer", "doc": "", "fields": [{"name": "reportEventId", "type": "string", "doc": ""},
{"name": "genomicEntities", "type": {"type": "array", "items": {"type": "record", "name":
"GenomicEntity", "doc": "", "fields": [{"name": "type", "type": {"type": "enum", "name":
"GenomicEntityType", "doc": "", "symbols": ["regulatory_region", "gene", "transcript",
"intergenic"]}, "doc": ""}, {"name": "ensemblId", "type": "string", "doc": ""}, {"name":
"geneSymbol", "type": ["null", "string"], "doc": ""}, {"name": "otherIds", "type": ["null", {"type":
"map", "values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "variantConsequences", "type":
{"type": "array", "items": {"type": "record", "name": "VariantConsequence", "doc": "", "fields":
[{"name": "id", "type": "string", "doc": ""}, {"name": "name", "type": ["null", "string"], "doc":
""}]}}, "doc": ""}, {"name": "actions", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "Action", "doc": "", "fields": [{"name": "actionType", "type": ["null", {"type":
"enum", "name": "ActionType", "doc": "", "symbols": ["therapy", "therapeutic", "prognosis",
"diagnosis"]}], "doc": ""}, {"name": "references", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "status", "type": ["null", {"type": "enum", "name": "ActionStatus",
"doc": "", "symbols": ["clinical", "pre_clinical"]}], "doc": ""}, {"name": "variantActionable",
"type": "boolean", "doc": ""}, {"name": "url", "type": ["null", "string"], "doc": ""}, {"name":
"evidenceType", "type": ["null", "string"], "doc": ""}, {"name": "source", "type": "string", "doc":
""}]}}], "doc": ""}, {"name": "groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name":
"eventJustification", "type": ["null", "string"], "doc": ""}, {"name": "score", "type": ["null",
"float"], "doc": ""}, {"name": "vendorSpecificScores", "type": ["null", {"type": "map", "values":
"float"}], "doc": ""}, {"name": "variantClassification", "type": ["null", {"type": "record", "name":
"VariantClassification", "doc": "", "fields": [{"name": "clinicalSignificance", "type": ["null",
{"type": "enum", "name": "ClinicalSignificance", "symbols": ["benign", "likely_benign", "VUS",
"likely_pathogenic", "pathogenic", "uncertain_significance"]}], "doc": ""}, {"name":
"drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "roleInCancer", "type": ["null", {"type": "array", "items": {"type": "enum", "name":
"RoleInCancer", "doc": "", "symbols": ["oncogene", "tumor_suppressor_gene", "both"]}}], "doc": ""},
{"name": "tier", "type": ["null", {"type": "enum", "name": "Tier", "doc": "", "symbols": ["NONE",
"TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc": ""}, {"name":
"additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}, {"name": "references", "type": ["null", {"type": "map", "values": "string"}], "doc": ""},
{"name": "variantAttributes", "type": ["null", {"type": "record", "name": "VariantAttributes",
"doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""}, {"name":
"recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type": ["null",
"string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values": "string"}],
"doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type": "array", "items":
{"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name": "study", "type":
"string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}}, "doc":
""}, {"name": "referenceDatabasesVersions", "type": {"type": "map", "values": "string"}, "doc": ""},
{"name": "softwareVersions", "type": {"type": "map", "values": "string"}, "doc": ""}, {"name":
"comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"comments",
"interpretationRequestId",
"interpretationRequestVersion",
"interpretationService",
"referenceDatabasesVersions",
"reportUrl",
"softwareVersions",
"variants",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariantCancer,
'versionControl': ReportVersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariantCancer,
'versionControl': ReportVersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'comments', 'interpretationRequestId',
'interpretationRequestVersion', 'interpretationService',
'referenceDatabasesVersions', 'reportUrl', 'softwareVersions',
'variants', 'versionControl'
]
def __init__(self, **kwargs):
self.comments = kwargs.get(
'comments', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.interpretationService = kwargs.get(
'interpretationService', None)
self.referenceDatabasesVersions = kwargs.get(
'referenceDatabasesVersions', None)
self.reportUrl = kwargs.get(
'reportUrl', None)
self.softwareVersions = kwargs.get(
'softwareVersions', None)
self.variants = kwargs.get(
'variants', None)
self.versionControl = kwargs.get(
'versionControl', ReportVersionControl())
class CancerParticipant(ProtocolElement):
"""
This defines a Cancer Participant
"""
_schemaSource = """
{"type": "record", "name": "CancerParticipant", "namespace": "org.gel.models.participant.avro",
"doc": "", "fields": [{"name": "yearOfBirth", "type": ["null", "int"], "doc": ""}, {"name":
"morphology", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "consentStatus", "type": ["null",
{"type": "record", "name": "ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent",
"type": "boolean", "doc": "", "default": false}, {"name": "primaryFindingConsent", "type":
"boolean", "doc": "", "default": false}, {"name": "secondaryFindingConsent", "type": "boolean",
"doc": "", "default": false}, {"name": "carrierStatusConsent", "type": "boolean", "doc": "",
"default": false}]}], "doc": ""}, {"name": "center", "type": ["null", "string"], "doc": ""},
{"name": "individualId", "type": "string", "doc": ""}, {"name": "primaryDiagnosisDisease", "type":
["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "primaryDiagnosisSubDisease",
"type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "sex", "type": {"type":
"enum", "name": "Sex", "doc": "", "symbols": ["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name":
"additionalInformation", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}, {"name":
"assignedICD10", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"tumourSamples", "type": {"type": "array", "items": {"type": "record", "name": "TumourSample",
"doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId",
"type": "int", "doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "tumourId",
"type": "string", "doc": ""}, {"name": "programmePhase", "type": ["null", {"type": "enum", "name":
"ProgrammePhase", "symbols": ["CRUK", "OXFORD", "CLL", "IIP", "MAIN", "EXPT"]}], "doc": ""},
{"name": "diseaseType", "type": ["null", {"type": "enum", "name": "diseaseType", "symbols":
["ADULT_GLIOMA", "BLADDER", "BREAST", "CARCINOMA_OF_UNKNOWN_PRIMARY", "CHILDHOOD", "COLORECTAL",
"ENDOMETRIAL_CARCINOMA", "HAEMONC", "HEPATOPANCREATOBILIARY", "LUNG", "MALIGNANT_MELANOMA",
"NASOPHARYNGEAL", "ORAL_OROPHARYNGEAL", "OVARIAN", "PROSTATE", "RENAL", "SARCOMA", "SINONASAL",
"TESTICULAR_GERM_CELL_TUMOURS", "UPPER_GASTROINTESTINAL",
"NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE", "CLASSICAL_HODGKINS",
"NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS", "T_CELL_LYMPHOMA"]}], "doc": ""}, {"name":
"diseaseSubType", "type": ["null", "string"], "doc": ""}, {"name": "clinicalSampleDateTime", "type":
["null", "string"], "doc": ""}, {"name": "tumourType", "type": ["null", {"type": "enum", "name":
"TumourType", "symbols": ["PRIMARY", "METASTATIC_RECURRENCE", "RECURRENCE_OF_PRIMARY_TUMOUR",
"METASTASES"]}], "doc": ""}, {"name": "tumourContent", "type": ["null", {"type": "enum", "name":
"TumourContent", "symbols": ["High", "Medium", "Low"]}], "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "doc": "", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type":
"enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "tissueSource", "type": ["null", {"type":
"enum", "name": "TissueSource", "symbols": ["BMA_TUMOUR_SORTED_CELLS", "CT_GUIDED_BIOPSY",
"ENDOSCOPIC_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA",
"LAPAROSCOPIC_BIOPSY", "LAPAROSCOPIC_EXCISION", "MRI_GUIDED_BIOPSY", "NON_GUIDED_BIOPSY",
"SURGICAL_RESECTION", "STEREOTACTICALLY_GUIDED_BIOPSY", "USS_GUIDED_BIOPSY",
"NON_STANDARD_BIOPSY"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "morphologyICD", "type": ["null",
"string"], "doc": ""}, {"name": "morphologySnomedCT", "type": ["null", "string"], "doc": ""},
{"name": "morphologySnomedRT", "type": ["null", "string"], "doc": ""}, {"name": "topographyICD",
"type": ["null", "string"], "doc": ""}, {"name": "topographySnomedCT", "type": ["null", "string"],
"doc": ""}, {"name": "topographySnomedRT", "type": ["null", "string"], "doc": ""}]}}, "doc": ""},
{"name": "germlineSamples", "type": {"type": "array", "items": {"type": "record", "name":
"GermlineSample", "doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name":
"labSampleId", "type": "int", "doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name":
"source", "type": ["null", "SampleSource"], "doc": ""}, {"name": "product", "type": ["null",
"Product"], "doc": ""}, {"name": "preparationMethod", "type": ["null", "PreparationMethod"], "doc":
""}, {"name": "programmePhase", "type": ["null", "ProgrammePhase"], "doc": ""}, {"name":
"clinicalSampleDateTime", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name":
"matchedSamples", "type": {"type": "array", "items": {"type": "record", "name": "MatchedSamples",
"doc": "", "fields": [{"name": "germlineSampleId", "type": ["null", "string"], "doc": ""}, {"name":
"tumourSampleId", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "versionControl",
"type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInformation",
"assignedICD10",
"center",
"consentStatus",
"germlineSamples",
"individualId",
"matchedSamples",
"morphology",
"primaryDiagnosisDisease",
"primaryDiagnosisSubDisease",
"readyForAnalysis",
"sex",
"tumourSamples",
"versionControl",
"yearOfBirth",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'consentStatus': ConsentStatus,
'germlineSamples': GermlineSample,
'matchedSamples': MatchedSamples,
'tumourSamples': TumourSample,
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'consentStatus': ConsentStatus,
'germlineSamples': GermlineSample,
'matchedSamples': MatchedSamples,
'tumourSamples': TumourSample,
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInformation', 'assignedICD10', 'center',
'consentStatus', 'germlineSamples', 'individualId',
'matchedSamples', 'morphology', 'primaryDiagnosisDisease',
'primaryDiagnosisSubDisease', 'readyForAnalysis', 'sex',
'tumourSamples', 'versionControl', 'yearOfBirth'
]
def __init__(self, **kwargs):
self.additionalInformation = kwargs.get(
'additionalInformation', None)
self.assignedICD10 = kwargs.get(
'assignedICD10', None)
self.center = kwargs.get(
'center', None)
self.consentStatus = kwargs.get(
'consentStatus', None)
self.germlineSamples = kwargs.get(
'germlineSamples', None)
self.individualId = kwargs.get(
'individualId', None)
self.matchedSamples = kwargs.get(
'matchedSamples', None)
self.morphology = kwargs.get(
'morphology', None)
self.primaryDiagnosisDisease = kwargs.get(
'primaryDiagnosisDisease', None)
self.primaryDiagnosisSubDisease = kwargs.get(
'primaryDiagnosisSubDisease', None)
self.readyForAnalysis = kwargs.get(
'readyForAnalysis', None)
self.sex = kwargs.get(
'sex', None)
self.tumourSamples = kwargs.get(
'tumourSamples', None)
self.versionControl = kwargs.get(
'versionControl', None)
self.yearOfBirth = kwargs.get(
'yearOfBirth', None)
class CancerSomaticVariantLevelQuestions(ProtocolElement):
"""
The questions for the cancer program exit questionnaire for
somatic variants
"""
_schemaSource = """
{"type": "record", "name": "CancerSomaticVariantLevelQuestions", "namespace":
"org.gel.models.report.avro", "doc": "", "fields": [{"name": "variantDetails", "type": "string",
"doc": ""}, {"name": "variantActionability", "type": {"type": "array", "items": {"type": "enum",
"name": "CancerActionabilitySomatic", "doc": "", "symbols": ["predicts_therapeutic_response",
"prognostic", "defines_diagnosis_group", "eligibility_for_trial", "other"]}}, "doc": ""}, {"name":
"otherVariantActionability", "type": ["null", "string"], "doc": ""}, {"name": "variantUsability",
"type": {"type": "enum", "name": "CancerUsabilitySomatic", "doc": "", "symbols":
["already_actioned", "actioned_result_of_this_wga", "not_yet_actioned"]}, "doc": ""}, {"name":
"variantTested", "type": {"type": "enum", "name": "CancerTested", "doc": "", "symbols":
["not_indicated_for_patient_care", "no_orthologous_test_available", "test_performed_prior_to_wga",
"technical_validation_following_wga"]}, "doc": ""}, {"name": "validationAssayType", "type":
"string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"otherVariantActionability",
"validationAssayType",
"variantActionability",
"variantDetails",
"variantTested",
"variantUsability",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'otherVariantActionability', 'validationAssayType',
'variantActionability', 'variantDetails', 'variantTested',
'variantUsability'
]
def __init__(self, **kwargs):
self.otherVariantActionability = kwargs.get(
'otherVariantActionability', None)
self.validationAssayType = kwargs.get(
'validationAssayType', None)
self.variantActionability = kwargs.get(
'variantActionability', None)
self.variantDetails = kwargs.get(
'variantDetails', None)
self.variantTested = kwargs.get(
'variantTested', None)
self.variantUsability = kwargs.get(
'variantUsability', None)
class CancerTested(object):
"""
Was the variant validated with an orthogonal technology? *
`not_indicated_for_patient_care`: No: not indicated for patient
care at this time * `no_orthologous_test_available`: No: no
orthologous test available * `test_performed_prior_to_wga`: Yes:
test performed prior to receiving WGA (eg using standard-of-care
assay such as panel testing, or sanger sequencing) *
`technical_validation_following_WGA`: Yes: technical validation
performed/planned following receiving this WGA
"""
not_indicated_for_patient_care = "not_indicated_for_patient_care"
no_orthologous_test_available = "no_orthologous_test_available"
test_performed_prior_to_wga = "test_performed_prior_to_wga"
technical_validation_following_wga = "technical_validation_following_wga"
def __hash__(self):
return str(self).__hash__()
class CancerTestedAdditional(object):
"""
An enumeration Variant tested: *
`not_indicated_for_patient_care`: No: not indicated for patient
care at this time * `no_orthologous_test_available`: No: no
orthologous test available * `test_performed_prior_to_wga`:
Yes: test performed prior to receiving WGA (eg using
standard-of-care assay such as panel testing, or sanger
sequencing) * `technical_validation_following_wga`: Yes:
technical validation performed/planned following receiving this
WGA * `na`: N/A
"""
not_indicated_for_patient_care = "not_indicated_for_patient_care"
no_orthologous_test_available = "no_orthologous_test_available"
test_performed_prior_to_wga = "test_performed_prior_to_wga"
technical_validation_following_wga = "technical_validation_following_wga"
na = "na"
def __hash__(self):
return str(self).__hash__()
class CancerUsabilityGermline(object):
"""
Variant usability for germline variants: * `already_actioned`:
Already actioned (i.e. prior to receiving this WGA) *
`actioned_result_of_this_wga`: actioned as a result of receiving
this WGA
"""
already_actioned = "already_actioned"
actioned_result_of_this_wga = "actioned_result_of_this_wga"
def __hash__(self):
return str(self).__hash__()
class CancerUsabilitySomatic(object):
"""
Variant usability for somatic variants: * `already_actioned`:
Already actioned (i.e. prior to receiving this WGA) *
`actioned_result_of_this_wga`: actioned as a result of receiving
this WGA * `not_yet_actioned`: not yet actioned, but potentially
actionable in the future
"""
already_actioned = "already_actioned"
actioned_result_of_this_wga = "actioned_result_of_this_wga"
not_yet_actioned = "not_yet_actioned"
def __hash__(self):
return str(self).__hash__()
class CaseShared(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "CaseShared", "namespace": "org.gel.models.report.avro", "fields":
[{"name": "previousGroups", "type": {"type": "array", "items": "string"}}, {"name":
"modifiedGroups", "type": {"type": "array", "items": "string"}}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"modifiedGroups",
"previousGroups",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'modifiedGroups', 'previousGroups'
]
def __init__(self, **kwargs):
self.modifiedGroups = kwargs.get(
'modifiedGroups', None)
self.previousGroups = kwargs.get(
'previousGroups', None)
class CaseSolvedFamily(object):
"""
No documentation
"""
yes = "yes"
no = "no"
partially = "partially"
unknown = "unknown"
def __hash__(self):
return str(self).__hash__()
class ChiSquare1KGenomesPhase3Pop(ProtocolElement):
"""
Chi-square test for goodness of fit of this sample to 1000 Genomes
Phase 3 populations
"""
_schemaSource = """
{"type": "record", "name": "ChiSquare1KGenomesPhase3Pop", "namespace":
"org.gel.models.participant.avro", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"chiSquare",
"kgPopCategory",
"kgSuperPopCategory",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'chiSquare', 'kgPopCategory', 'kgSuperPopCategory'
]
def __init__(self, **kwargs):
self.chiSquare = kwargs.get(
'chiSquare', None)
self.kgPopCategory = kwargs.get(
'kgPopCategory', None)
self.kgSuperPopCategory = kwargs.get(
'kgSuperPopCategory', None)
class ClinicalReportCancer(ProtocolElement):
"""
A clinical report for the cancer program. This holds the list of
reported variants by a GMC together with all the relevant
information that identify the case and how these conclusions were
reached.
"""
_schemaSource = """
{"type": "record", "name": "ClinicalReportCancer", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "interpretationRequestId", "type": "string", "doc": ""}, {"name":
"interpretationRequestVersion", "type": "int", "doc": ""}, {"name": "reportingDate", "type":
"string", "doc": ""}, {"name": "user", "type": "string", "doc": ""}, {"name": "variants", "type":
["null", {"type": "array", "items": {"type": "record", "name": "ReportedVariantCancer", "doc": "",
"fields": [{"name": "variantCoordinates", "type": {"type": "record", "name": "VariantCoordinates",
"doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position",
"type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate",
"type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly",
"doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type":
["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEventCancer", "doc": "", "fields": [{"name":
"reportEventId", "type": "string", "doc": ""}, {"name": "genomicEntities", "type": {"type": "array",
"items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields": [{"name": "type", "type":
{"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols": ["regulatory_region", "gene",
"transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId", "type": "string", "doc": ""},
{"name": "geneSymbol", "type": ["null", "string"], "doc": ""}, {"name": "otherIds", "type": ["null",
{"type": "map", "values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "variantConsequences",
"type": {"type": "array", "items": {"type": "record", "name": "VariantConsequence", "doc": "",
"fields": [{"name": "id", "type": "string", "doc": ""}, {"name": "name", "type": ["null", "string"],
"doc": ""}]}}, "doc": ""}, {"name": "actions", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "Action", "doc": "", "fields": [{"name": "actionType", "type": ["null", {"type":
"enum", "name": "ActionType", "doc": "", "symbols": ["therapy", "therapeutic", "prognosis",
"diagnosis"]}], "doc": ""}, {"name": "references", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "status", "type": ["null", {"type": "enum", "name": "ActionStatus",
"doc": "", "symbols": ["clinical", "pre_clinical"]}], "doc": ""}, {"name": "variantActionable",
"type": "boolean", "doc": ""}, {"name": "url", "type": ["null", "string"], "doc": ""}, {"name":
"evidenceType", "type": ["null", "string"], "doc": ""}, {"name": "source", "type": "string", "doc":
""}]}}], "doc": ""}, {"name": "groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name":
"eventJustification", "type": ["null", "string"], "doc": ""}, {"name": "score", "type": ["null",
"float"], "doc": ""}, {"name": "vendorSpecificScores", "type": ["null", {"type": "map", "values":
"float"}], "doc": ""}, {"name": "variantClassification", "type": ["null", {"type": "record", "name":
"VariantClassification", "doc": "", "fields": [{"name": "clinicalSignificance", "type": ["null",
{"type": "enum", "name": "ClinicalSignificance", "symbols": ["benign", "likely_benign", "VUS",
"likely_pathogenic", "pathogenic", "uncertain_significance"]}], "doc": ""}, {"name":
"drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "roleInCancer", "type": ["null", {"type": "array", "items": {"type": "enum", "name":
"RoleInCancer", "doc": "", "symbols": ["oncogene", "tumor_suppressor_gene", "both"]}}], "doc": ""},
{"name": "tier", "type": ["null", {"type": "enum", "name": "Tier", "doc": "", "symbols": ["NONE",
"TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc": ""}, {"name":
"additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}, {"name": "references", "type": ["null", {"type": "map", "values": "string"}], "doc": ""},
{"name": "variantAttributes", "type": ["null", {"type": "record", "name": "VariantAttributes",
"doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""}, {"name":
"recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type": ["null",
"string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values": "string"}],
"doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type": "array", "items":
{"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name": "study", "type":
"string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}}], "doc":
""}, {"name": "genomicInterpretation", "type": "string", "doc": ""}, {"name": "references", "type":
["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "referenceDatabasesVersions",
"type": {"type": "map", "values": "string"}, "doc": ""}, {"name": "softwareVersions", "type":
{"type": "map", "values": "string"}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"genomicInterpretation",
"interpretationRequestId",
"interpretationRequestVersion",
"referenceDatabasesVersions",
"references",
"reportingDate",
"softwareVersions",
"user",
"variants",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariantCancer,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariantCancer,
}
return embeddedTypes[fieldName]
__slots__ = [
'genomicInterpretation', 'interpretationRequestId',
'interpretationRequestVersion', 'referenceDatabasesVersions',
'references', 'reportingDate', 'softwareVersions', 'user',
'variants'
]
def __init__(self, **kwargs):
self.genomicInterpretation = kwargs.get(
'genomicInterpretation', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.referenceDatabasesVersions = kwargs.get(
'referenceDatabasesVersions', None)
self.references = kwargs.get(
'references', None)
self.reportingDate = kwargs.get(
'reportingDate', None)
self.softwareVersions = kwargs.get(
'softwareVersions', None)
self.user = kwargs.get(
'user', None)
self.variants = kwargs.get(
'variants', None)
class ClinicalReportRD(ProtocolElement):
"""
A clinical report for the rare disease program. This holds the
list of reported variants by a GMC together with all the
relevant information that identify the case and how these
conclusions were reached.
"""
_schemaSource = """
{"type": "record", "name": "ClinicalReportRD", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "interpretationRequestId", "type": "string", "doc": ""}, {"name":
"interpretationRequestVersion", "type": "int", "doc": ""}, {"name": "reportingDate", "type":
"string", "doc": ""}, {"name": "user", "type": "string", "doc": ""}, {"name": "variants", "type":
["null", {"type": "array", "items": {"type": "record", "name": "ReportedVariant", "doc": "",
"fields": [{"name": "variantCoordinates", "type": {"type": "record", "name": "VariantCoordinates",
"doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position",
"type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate",
"type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly",
"doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type":
["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEvent", "doc": "", "fields": [{"name": "reportEventId",
"type": "string", "doc": ""}, {"name": "phenotypes", "type": {"type": "array", "items": "string"},
"doc": ""}, {"name": "variantConsequences", "type": {"type": "array", "items": {"type": "record",
"name": "VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""},
{"name": "name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel",
"type": ["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName",
"type": "string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}],
"doc": ""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name":
"ReportedModeOfInheritance", "doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance", "namespace":
"org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}}], "doc":
""}, {"name": "genomicInterpretation", "type": "string", "doc": ""}, {"name":
"additionalAnalysisPanels", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"AdditionalAnalysisPanel", "doc": "", "fields": [{"name": "specificDisease", "type": "string"},
{"name": "panel", "type": "GenePanel"}]}}], "doc": ""}, {"name": "references", "type": ["null",
{"type": "array", "items": "string"}], "doc": ""}, {"name": "referenceDatabasesVersions", "type":
{"type": "map", "values": "string"}, "doc": ""}, {"name": "softwareVersions", "type": {"type":
"map", "values": "string"}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalAnalysisPanels",
"genomicInterpretation",
"interpretationRequestId",
"interpretationRequestVersion",
"referenceDatabasesVersions",
"references",
"reportingDate",
"softwareVersions",
"user",
"variants",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'additionalAnalysisPanels': AdditionalAnalysisPanel,
'variants': ReportedVariant,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'additionalAnalysisPanels': AdditionalAnalysisPanel,
'variants': ReportedVariant,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalAnalysisPanels', 'genomicInterpretation',
'interpretationRequestId', 'interpretationRequestVersion',
'referenceDatabasesVersions', 'references', 'reportingDate',
'softwareVersions', 'user', 'variants'
]
def __init__(self, **kwargs):
self.additionalAnalysisPanels = kwargs.get(
'additionalAnalysisPanels', None)
self.genomicInterpretation = kwargs.get(
'genomicInterpretation', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.referenceDatabasesVersions = kwargs.get(
'referenceDatabasesVersions', None)
self.references = kwargs.get(
'references', None)
self.reportingDate = kwargs.get(
'reportingDate', None)
self.softwareVersions = kwargs.get(
'softwareVersions', None)
self.user = kwargs.get(
'user', None)
self.variants = kwargs.get(
'variants', None)
class ClinicalSignificance(object):
"""
No documentation
"""
benign = "benign"
likely_benign = "likely_benign"
VUS = "VUS"
likely_pathogenic = "likely_pathogenic"
pathogenic = "pathogenic"
uncertain_significance = "uncertain_significance"
def __hash__(self):
return str(self).__hash__()
class ClinicalUtility(object):
"""
No documentation
"""
none = "none"
change_in_medication = "change_in_medication"
surgical_option = "surgical_option"
additional_surveillance_for_proband_or_relatives = "additional_surveillance_for_proband_or_relatives"
clinical_trial_eligibility = "clinical_trial_eligibility"
informs_reproductive_choice = "informs_reproductive_choice"
unknown = "unknown"
other = "other"
def __hash__(self):
return str(self).__hash__()
class Code(object):
"""
This code define the change type, it can define a general change
in the case as CLOSED or can define a change in one or more
variants: * `C0`: **Case Closed successfully**: Clinical Report
was generated with **one or more Candidate Variants**. * `C1`:
**Case Closed unsuccessfully**: Clinical Report couldn't be
generated because **no Candidate Variants were found**. * `C2`:
**Case Blocked**: Errors were found in this cases and was sent to
quarantine for further investigation * `C3`: **Case Shared**: This
cases was shared with other group of users. * `C4`: **Supporting
evidence change**: One or More supporting evidence were modified
to the cases __(See ClinicalReport)__. * `C5`: **Variant added**:
One or more variant were selected as Candidate Variants. * `C6`:
**Variant removed**: One or more variant were removed as Candidate
Variants. * `C7`: **Variant modified**: One or more Candidate
Variants were modified __(Any change or comment over this variants
should be capture)__.
"""
C0 = "C0"
C1 = "C1"
C2 = "C2"
C3 = "C3"
C4 = "C4"
C5 = "C5"
C6 = "C6"
C7 = "C7"
def __hash__(self):
return str(self).__hash__()
class ConfirmationDecision(object):
"""
No documentation
"""
yes = "yes"
no = "no"
na = "na"
def __hash__(self):
return str(self).__hash__()
class ConfirmationOutcome(object):
"""
No documentation
"""
yes = "yes"
no = "no"
na = "na"
def __hash__(self):
return str(self).__hash__()
class ConsentStatus(ProtocolElement):
"""
Consent Status
"""
_schemaSource = """
{"type": "record", "name": "ConsentStatus", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "", "default": false},
{"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"secondaryFindingConsent", "type": "boolean", "doc": "", "default": false}, {"name":
"carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'carrierStatusConsent', 'primaryFindingConsent',
'programmeConsent', 'secondaryFindingConsent'
]
def __init__(self, **kwargs):
self.carrierStatusConsent = kwargs.get(
'carrierStatusConsent', False)
self.primaryFindingConsent = kwargs.get(
'primaryFindingConsent', False)
self.programmeConsent = kwargs.get(
'programmeConsent', False)
self.secondaryFindingConsent = kwargs.get(
'secondaryFindingConsent', False)
class DiseasePenetrance(ProtocolElement):
"""
A disease penetrance definition
"""
_schemaSource = """
{"type": "record", "name": "DiseasePenetrance", "namespace": "org.gel.models.participant.avro",
"doc": "", "fields": [{"name": "specificDisease", "type": "string", "doc": ""}, {"name":
"penetrance", "type": {"type": "enum", "name": "Penetrance", "doc": "", "symbols": ["complete",
"incomplete"]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"penetrance",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'penetrance', 'specificDisease'
]
def __init__(self, **kwargs):
self.penetrance = kwargs.get(
'penetrance', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class Disorder(ProtocolElement):
"""
This is quite GEL specific. This is the way is stored in
ModelCatalogue and PanelApp. Currently all specific disease
titles are assigned to a disease subgroup so really only
specificDisease needs to be completed but we add the others
for generality
"""
_schemaSource = """
{"type": "record", "name": "Disorder", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""}, {"name":
"diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease", "type":
["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ageOfOnset",
"diseaseGroup",
"diseaseSubGroup",
"specificDisease",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'ageOfOnset', 'diseaseGroup', 'diseaseSubGroup',
'specificDisease'
]
def __init__(self, **kwargs):
self.ageOfOnset = kwargs.get(
'ageOfOnset', None)
self.diseaseGroup = kwargs.get(
'diseaseGroup', None)
self.diseaseSubGroup = kwargs.get(
'diseaseSubGroup', None)
self.specificDisease = kwargs.get(
'specificDisease', None)
class DrugResponseClassification(object):
"""
No documentation
"""
responsive = "responsive"
resistant = "resistant"
toxicity = "toxicity"
indication = "indication"
contraindication = "contraindication"
dosing = "dosing"
increased_monitoring = "increased_monitoring"
efficacy = "efficacy"
def __hash__(self):
return str(self).__hash__()
class EthnicCategory(object):
"""
This is the list of ethnicities in ONS16 * `D`: Mixed: White
and Black Caribbean * `E`: Mixed: White and Black African
* `F`: Mixed: White and Asian * `G`: Mixed: Any other mixed
background * `A`: White: British * `B`: White: Irish
* `C`: White: Any other White background * `L`: Asian or
Asian British: Any other Asian background * `M`: Black or
Black British: Caribbean * `N`: Black or Black British:
African * `H`: Asian or Asian British: Indian * `J`:
Asian or Asian British: Pakistani * `K`: Asian or Asian
British: Bangladeshi * `P`: Black or Black British: Any other
Black background * `S`: Other Ethnic Groups: Any other ethnic
group * `R`: Other Ethnic Groups: Chinese * `Z`: Not
stated
"""
D = "D"
E = "E"
F = "F"
G = "G"
A = "A"
B = "B"
C = "C"
L = "L"
M = "M"
N = "N"
H = "H"
J = "J"
K = "K"
P = "P"
S = "S"
R = "R"
Z = "Z"
def __hash__(self):
return str(self).__hash__()
class FamiliarRelationship(object):
"""
Familiar relationship from pedrigree
"""
TwinsMonozygous = "TwinsMonozygous"
TwinsDizygous = "TwinsDizygous"
TwinsUnknown = "TwinsUnknown"
FullSibling = "FullSibling"
FullSiblingF = "FullSiblingF"
FullSiblingM = "FullSiblingM"
Mother = "Mother"
Father = "Father"
Son = "Son"
Daughter = "Daughter"
ChildOfUnknownSex = "ChildOfUnknownSex"
MaternalAunt = "MaternalAunt"
MaternalUncle = "MaternalUncle"
MaternalUncleOrAunt = "MaternalUncleOrAunt"
PaternalAunt = "PaternalAunt"
PaternalUncle = "PaternalUncle"
PaternalUncleOrAunt = "PaternalUncleOrAunt"
MaternalGrandmother = "MaternalGrandmother"
PaternalGrandmother = "PaternalGrandmother"
MaternalGrandfather = "MaternalGrandfather"
PaternalGrandfather = "PaternalGrandfather"
DoubleFirstCousin = "DoubleFirstCousin"
MaternalCousinSister = "MaternalCousinSister"
PaternalCousinSister = "PaternalCousinSister"
MaternalCousinBrother = "MaternalCousinBrother"
PaternalCousinBrother = "PaternalCousinBrother"
Cousin = "Cousin"
Spouse = "Spouse"
Other = "Other"
RelationIsNotClear = "RelationIsNotClear"
Unknown = "Unknown"
def __hash__(self):
return str(self).__hash__()
class FamilyLevelQuestions(ProtocolElement):
"""
The family level questions
"""
_schemaSource = """
{"type": "record", "name": "FamilyLevelQuestions", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "caseSolvedFamily", "type": {"type": "enum", "name": "CaseSolvedFamily",
"symbols": ["yes", "no", "partially", "unknown"]}, "doc": ""}, {"name": "segregationQuestion",
"type": {"type": "enum", "name": "SegregationQuestion", "symbols": ["yes", "no"]}, "doc": ""},
{"name": "additionalComments", "type": "string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalComments",
"caseSolvedFamily",
"segregationQuestion",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'additionalComments', 'caseSolvedFamily',
'segregationQuestion'
]
def __init__(self, **kwargs):
self.additionalComments = kwargs.get(
'additionalComments', None)
self.caseSolvedFamily = kwargs.get(
'caseSolvedFamily', None)
self.segregationQuestion = kwargs.get(
'segregationQuestion', None)
class FamilyQCState(object):
"""
FamilyQCState
"""
noState = "noState"
passedMedicalReviewReadyForInterpretation = "passedMedicalReviewReadyForInterpretation"
passedMedicalReviewNotReadyForInterpretation = "passedMedicalReviewNotReadyForInterpretation"
queryToGel = "queryToGel"
queryToGMC = "queryToGMC"
failed = "failed"
def __hash__(self):
return str(self).__hash__()
class File(ProtocolElement):
"""
This defines a file This record is uniquely defined by the
sample identfier and an URI Currently sample identifier can be
a single string or a list of strings if multiple samples are
associated with the same file *
"""
_schemaSource = """
{"type": "record", "name": "File", "namespace": "org.gel.models.report.avro", "doc": "", "fields":
[{"name": "sampleId", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"uriFile", "type": "string", "doc": ""}, {"name": "fileType", "type": {"type": "enum", "name":
"FileType", "symbols": ["BAM", "gVCF", "VCF_small", "VCF_somatic_small", "VCF_CNV",
"VCF_somatic_CNV", "VCF_SV", "VCF_somatic_SV", "VCF_SV_CNV", "SVG", "ANN", "BigWig", "MD5Sum",
"ROH", "OTHER", "PARTITION", "VARIANT_FREQUENCIES", "COVERAGE"]}, "doc": ""}, {"name": "md5Sum",
"type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"fileType",
"md5Sum",
"sampleId",
"uriFile",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'fileType', 'md5Sum', 'sampleId', 'uriFile'
]
def __init__(self, **kwargs):
self.fileType = kwargs.get(
'fileType', None)
self.md5Sum = kwargs.get(
'md5Sum', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.uriFile = kwargs.get(
'uriFile', None)
class FileType(object):
"""
No documentation
"""
BAM = "BAM"
gVCF = "gVCF"
VCF_small = "VCF_small"
VCF_somatic_small = "VCF_somatic_small"
VCF_CNV = "VCF_CNV"
VCF_somatic_CNV = "VCF_somatic_CNV"
VCF_SV = "VCF_SV"
VCF_somatic_SV = "VCF_somatic_SV"
VCF_SV_CNV = "VCF_SV_CNV"
SVG = "SVG"
ANN = "ANN"
BigWig = "BigWig"
MD5Sum = "MD5Sum"
ROH = "ROH"
OTHER = "OTHER"
PARTITION = "PARTITION"
VARIANT_FREQUENCIES = "VARIANT_FREQUENCIES"
COVERAGE = "COVERAGE"
def __hash__(self):
return str(self).__hash__()
class GenePanel(ProtocolElement):
"""
A panel of genes
"""
_schemaSource = """
{"type": "record", "name": "GenePanel", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "panelName", "type": "string", "doc": ""}, {"name": "panelVersion", "type":
["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"panelName",
"panelVersion",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'panelName', 'panelVersion'
]
def __init__(self, **kwargs):
self.panelName = kwargs.get(
'panelName', None)
self.panelVersion = kwargs.get(
'panelVersion', None)
class GenomicEntity(ProtocolElement):
"""
A genomic feature
"""
_schemaSource = """
{"type": "record", "name": "GenomicEntity", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "",
"symbols": ["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name":
"ensemblId", "type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc":
""}, {"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ensemblId",
"geneSymbol",
"otherIds",
"type",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'ensemblId', 'geneSymbol', 'otherIds', 'type'
]
def __init__(self, **kwargs):
self.ensemblId = kwargs.get(
'ensemblId', None)
self.geneSymbol = kwargs.get(
'geneSymbol', None)
self.otherIds = kwargs.get(
'otherIds', None)
self.type = kwargs.get(
'type', None)
class GenomicEntityType(object):
"""
Types of genomic features: * `regulatory_region`: a regulatory
region * `gene`: a gene * `transcript`: a transcript *
`intergenic`: an intergenic region
"""
regulatory_region = "regulatory_region"
gene = "gene"
transcript = "transcript"
intergenic = "intergenic"
def __hash__(self):
return str(self).__hash__()
class GermlineSample(ProtocolElement):
"""
A germline sample
"""
_schemaSource = """
{"type": "record", "name": "GermlineSample", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type":
"int", "doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "doc": "", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null",
{"type": "enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "programmePhase", "type": ["null",
{"type": "enum", "name": "ProgrammePhase", "symbols": ["CRUK", "OXFORD", "CLL", "IIP", "MAIN",
"EXPT"]}], "doc": ""}, {"name": "clinicalSampleDateTime", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"clinicalSampleDateTime",
"labSampleId",
"preparationMethod",
"product",
"programmePhase",
"sampleId",
"source",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'clinicalSampleDateTime', 'labSampleId',
'preparationMethod', 'product', 'programmePhase', 'sampleId',
'source'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.clinicalSampleDateTime = kwargs.get(
'clinicalSampleDateTime', None)
self.labSampleId = kwargs.get(
'labSampleId', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.programmePhase = kwargs.get(
'programmePhase', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
class HpoTerm(ProtocolElement):
"""
This defines an HPO term and its modifiers (possibly multiple)
If HPO term presence is unknown we don't have a entry on the list
"""
_schemaSource = """
{"type": "record", "name": "HpoTerm", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "term", "type": "string", "doc": ""}, {"name": "termPresence", "type": ["null",
{"type": "enum", "name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc":
""}, {"name": "hpoBuildNumber", "type": ["null", "string"], "doc": ""}, {"name": "modifiers",
"type": ["null", {"type": "record", "name": "HpoTermModifiers", "fields": [{"name": "laterality",
"type": ["null", {"type": "enum", "name": "Laterality", "symbols": ["RIGHT", "UNILATERAL",
"BILATERAL", "LEFT"]}]}, {"name": "progression", "type": ["null", {"type": "enum", "name":
"Progression", "symbols": ["PROGRESSIVE", "NONPROGRESSIVE"]}]}, {"name": "severity", "type":
["null", {"type": "enum", "name": "Severity", "symbols": ["BORDERLINE", "MILD", "MODERATE",
"SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern", "type": ["null", {"type": "enum", "name":
"SpatialPattern", "symbols": ["DISTAL", "GENERALIZED", "LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""},
{"name": "ageOfOnset", "type": ["null", {"type": "enum", "name": "AgeOfOnset", "symbols":
["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET", "INFANTILE_ONSET", "CHILDHOOD_ONSET",
"JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET", "MIDDLE_AGE_ONSET"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"ageOfOnset",
"hpoBuildNumber",
"modifiers",
"term",
"termPresence",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiers': HpoTermModifiers,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiers': HpoTermModifiers,
}
return embeddedTypes[fieldName]
__slots__ = [
'ageOfOnset', 'hpoBuildNumber', 'modifiers', 'term',
'termPresence'
]
def __init__(self, **kwargs):
self.ageOfOnset = kwargs.get(
'ageOfOnset', None)
self.hpoBuildNumber = kwargs.get(
'hpoBuildNumber', None)
self.modifiers = kwargs.get(
'modifiers', None)
self.term = kwargs.get(
'term', None)
self.termPresence = kwargs.get(
'termPresence', None)
class HpoTermModifiers(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "HpoTermModifiers", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "laterality", "type": ["null", {"type": "enum", "name": "Laterality", "symbols":
["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name": "progression", "type": ["null", {"type":
"enum", "name": "Progression", "symbols": ["PROGRESSIVE", "NONPROGRESSIVE"]}]}, {"name": "severity",
"type": ["null", {"type": "enum", "name": "Severity", "symbols": ["BORDERLINE", "MILD", "MODERATE",
"SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern", "type": ["null", {"type": "enum", "name":
"SpatialPattern", "symbols": ["DISTAL", "GENERALIZED", "LOCALIZED", "PROXIMAL"]}]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"laterality",
"progression",
"severity",
"spatialPattern",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'laterality', 'progression', 'severity', 'spatialPattern'
]
def __init__(self, **kwargs):
self.laterality = kwargs.get(
'laterality', None)
self.progression = kwargs.get(
'progression', None)
self.severity = kwargs.get(
'severity', None)
self.spatialPattern = kwargs.get(
'spatialPattern', None)
class InbreedingCoefficient(ProtocolElement):
"""
Inbreeding coefficient
"""
_schemaSource = """
{"type": "record", "name": "InbreedingCoefficient", "namespace": "org.gel.models.participant.avro",
"doc": "", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "program", "type":
"string", "doc": ""}, {"name": "version", "type": "string", "doc": ""}, {"name": "estimationMethod",
"type": "string", "doc": ""}, {"name": "coefficient", "type": "double", "doc": ""}, {"name":
"standardError", "type": ["null", "double"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"coefficient",
"estimationMethod",
"program",
"sampleId",
"standardError",
"version",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'coefficient', 'estimationMethod', 'program', 'sampleId',
'standardError', 'version'
]
def __init__(self, **kwargs):
self.coefficient = kwargs.get(
'coefficient', None)
self.estimationMethod = kwargs.get(
'estimationMethod', None)
self.program = kwargs.get(
'program', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.standardError = kwargs.get(
'standardError', None)
self.version = kwargs.get(
'version', None)
class InterpretationData(ProtocolElement):
"""
Represents the set of all interpretation data (excluding file
contents) to be stored in MDT for one TieringResult. Semantic
restrictions (not automatically verifiable): * All
InterpretedGenomesRD in interpretationResults refer to the
TieringResult tieringResult. * All InterpretedGenomesRD in
interpretationResults have passed the QC stage and have been
approved by the originating GMCs
"""
_schemaSource = """
{"type": "record", "name": "InterpretationData", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "interpretationMetaData", "type": {"type": "record", "name":
"InterpretationRequestRD", "doc": "", "fields": [{"name": "versionControl", "type": {"type":
"record", "name": "ReportVersionControl", "fields": [{"name": "gitVersionControl", "type": "string",
"doc": "", "default": "5.0.0"}]}, "doc": ""}, {"name": "interpretationRequestId", "type": "string",
"doc": ""}, {"name": "interpretationRequestVersion", "type": "int", "doc": ""}, {"name":
"internalStudyId", "type": "string", "doc": ""}, {"name": "genomeAssembly", "type": {"type": "enum",
"name": "Assembly", "doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}, {"name": "workspace",
"type": {"type": "array", "items": "string"}, "doc": ""}, {"name": "bams", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "File", "doc": "", "fields": [{"name": "sampleId",
"type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "uriFile", "type":
"string", "doc": ""}, {"name": "fileType", "type": {"type": "enum", "name": "FileType", "symbols":
["BAM", "gVCF", "VCF_small", "VCF_somatic_small", "VCF_CNV", "VCF_somatic_CNV", "VCF_SV",
"VCF_somatic_SV", "VCF_SV_CNV", "SVG", "ANN", "BigWig", "MD5Sum", "ROH", "OTHER", "PARTITION",
"VARIANT_FREQUENCIES", "COVERAGE"]}, "doc": ""}, {"name": "md5Sum", "type": ["null", "string"],
"doc": ""}]}}], "doc": ""}, {"name": "vcfs", "type": ["null", {"type": "array", "items": "File"}],
"doc": ""}, {"name": "bigWigs", "type": ["null", {"type": "array", "items": "File"}], "doc": ""},
{"name": "pedigreeDiagram", "type": ["null", "File"], "doc": ""}, {"name": "annotationFile", "type":
["null", "File"], "doc": ""}, {"name": "otherFiles", "type": ["null", {"type": "map", "values":
"File"}], "doc": ""}, {"name": "pedigree", "type": ["null", {"type": "record", "name": "Pedigree",
"namespace": "org.gel.models.participant.avro", "doc": "", "fields": [{"name": "versionControl",
"type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc": ""}, {"name":
"LDPCode", "type": ["null", "string"], "doc": ""}, {"name": "familyId", "type": "string", "doc":
""}, {"name": "members", "type": {"type": "array", "items": {"type": "record", "name":
"PedigreeMember", "doc": "", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""},
{"name": "isProband", "type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type":
["null", "string"], "doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum",
"name": "ParticipantQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}, {"name": "gelSuperFamilyId", "type": ["null",
"string"], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "", "symbols":
["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "personKaryotypicSex", "type": ["null",
{"type": "enum", "name": "PersonKaryotipicSex", "doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO",
"XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY", "OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type":
["null", "int"], "doc": ""}, {"name": "fatherId", "type": ["null", "int"], "doc": ""}, {"name":
"motherId", "type": ["null", "int"], "doc": ""}, {"name": "superFatherId", "type": ["null", "int"],
"doc": ""}, {"name": "superMotherId", "type": ["null", "int"], "doc": ""}, {"name": "twinGroup",
"type": ["null", "int"], "doc": ""}, {"name": "monozygotic", "type": ["null", {"type": "enum",
"name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name":
"adoptedStatus", "type": ["null", {"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols":
["notadopted", "adoptedin", "adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null",
{"type": "enum", "name": "LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED",
"UNBORN", "STILLBORN", "MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type":
["null", "TernaryOption"], "doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum",
"name": "AffectionStatus", "doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc":
""}, {"name": "disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"Disorder", "doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""},
{"name": "diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease",
"type": ["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc":
""}]}}], "doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""},
{"name": "termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber",
"type": ["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record",
"name": "HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum",
"name": "Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name":
"progression", "type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}}], "doc": ""}, {"name": "inbreedingCoefficient", "type": ["null",
{"type": "record", "name": "InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId",
"type": "string", "doc": ""}, {"name": "program", "type": "string", "doc": ""}, {"name": "version",
"type": "string", "doc": ""}, {"name": "estimationMethod", "type": "string", "doc": ""}, {"name":
"coefficient", "type": "double", "doc": ""}, {"name": "standardError", "type": ["null", "double"],
"doc": ""}]}], "doc": ""}, {"name": "additionalInformation", "type": ["null", {"type": "map",
"values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "analysisPanels", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "AnalysisPanel", "doc": "", "fields":
[{"name": "specificDisease", "type": "string", "doc": ""}, {"name": "panelName", "type": "string",
"doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}, {"name":
"reviewOutcome", "type": "string", "doc": ""}, {"name": "multipleGeneticOrigins", "type": "string",
"doc": ""}]}}], "doc": ""}, {"name": "diseasePenetrances", "type": ["null", {"type": "array",
"items": {"type": "record", "name": "DiseasePenetrance", "doc": "", "fields": [{"name":
"specificDisease", "type": "string", "doc": ""}, {"name": "penetrance", "type": {"type": "enum",
"name": "Penetrance", "doc": "", "symbols": ["complete", "incomplete"]}, "doc": ""}]}}], "doc": ""},
{"name": "readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "familyQCState", "type":
["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}]}], "doc": ""}, {"name": "otherFamilyHistory",
"type": ["null", {"type": "record", "name": "OtherFamilyHistory", "doc": "", "fields": [{"name":
"maternalFamilyHistory", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "paternalFamilyHistory", "type": ["null", {"type": "array", "items": "string"}], "doc":
""}]}], "doc": ""}, {"name": "genePanelsCoverage", "type": ["null", {"type": "map", "values":
{"type": "map", "values": {"type": "map", "values": "float"}}}], "doc": ""}, {"name":
"interpretationFlags", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"InterpretationFlag", "doc": "", "fields": [{"name": "interpretationFlag", "type": {"type": "enum",
"name": "InterpretationFlags", "doc": "", "symbols": ["mixed_chemistries", "mixedLab_preparation",
"low_tumour_purity", "uniparental_isodisomy", "uniparental_heterodisomy", "unusual_karyotype",
"high_cnv_count", "high_estimate_human_contamination_fraction", "mixed_recruiting_gmc",
"suspected_mosaicism", "low_quality_sample", "ffpe_tumour_sample", "ff_nano_tumour_sample",
"missing_values_for_proband_in_reported_variant", "reissued", "supplementary_report_errors",
"internal_use_only", "high_priority", "other"]}, "doc": ""}, {"name": "additionalDescription",
"type": ["null", "string"], "doc": ""}]}}], "doc": ""}, {"name": "additionalInfo", "type": ["null",
{"type": "map", "values": "string"}], "doc": ""}]}}, {"name": "tieringResult", "type": ["null",
{"type": "record", "name": "InterpretedGenomeRD", "doc": "", "fields": [{"name": "versionControl",
"type": "ReportVersionControl", "doc": ""}, {"name": "interpretationRequestId", "type": "string",
"doc": ""}, {"name": "interpretationRequestVersion", "type": "int", "doc": ""}, {"name":
"interpretationService", "type": "string", "doc": ""}, {"name": "reportUrl", "type": ["null",
"string"], "doc": ""}, {"name": "variants", "type": {"type": "array", "items": {"type": "record",
"name": "ReportedVariant", "doc": "", "fields": [{"name": "variantCoordinates", "type": {"type":
"record", "name": "VariantCoordinates", "doc": "", "fields": [{"name": "chromosome", "type":
"string", "doc": ""}, {"name": "position", "type": "int", "doc": ""}, {"name": "reference", "type":
"string", "doc": ""}, {"name": "alternate", "type": "string", "doc": ""}, {"name": "assembly",
"type": "Assembly", "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type": ["null", "string"], "doc":
""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "clinVarIds", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"genomicChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"cdnaChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"proteinChanges", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name":
"variantCalls", "type": {"type": "array", "items": {"type": "record", "name": "VariantCall", "doc":
"", "fields": [{"name": "participantId", "type": "string", "doc": ""}, {"name": "sampleId", "type":
"string", "doc": ""}, {"name": "zygosity", "type": {"type": "enum", "name": "Zygosity", "doc": "",
"symbols": ["reference_homozygous", "heterozygous", "alternate_homozygous", "missing",
"half_missing_reference", "half_missing_alternate", "alternate_hemizigous", "reference_hemizigous",
"unk", "na"]}, "doc": ""}, {"name": "phaseSet", "type": ["null", "int"], "doc": ""}, {"name": "vaf",
"type": ["null", "double"], "doc": ""}, {"name": "depthReference", "type": ["null", "int"], "doc":
""}, {"name": "depthAlternate", "type": ["null", "int"], "doc": ""}, {"name": "alleleOrigins",
"type": {"type": "array", "items": {"type": "enum", "name": "AlleleOrigin", "doc": "", "symbols":
["de_novo_variant", "germline_variant", "maternal_variant", "paternal_variant",
"pedigree_specific_variant", "population_specific_variant", "somatic_variant"]}}, "doc": ""}]}},
"doc": ""}, {"name": "reportEvents", "type": {"type": "array", "items": {"type": "record", "name":
"ReportEvent", "doc": "", "fields": [{"name": "reportEventId", "type": "string", "doc": ""},
{"name": "phenotypes", "type": {"type": "array", "items": "string"}, "doc": ""}, {"name":
"variantConsequences", "type": {"type": "array", "items": {"type": "record", "name":
"VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""}, {"name":
"name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel", "type":
["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName", "type":
"string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}], "doc":
""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name": "ReportedModeOfInheritance",
"doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", "org.gel.models.participant.avro.Penetrance"], "doc":
""}, {"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores",
"type": ["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification",
"type": ["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}}, "doc":
""}, {"name": "referenceDatabasesVersions", "type": {"type": "map", "values": "string"}, "doc": ""},
{"name": "softwareVersions", "type": {"type": "map", "values": "string"}, "doc": ""}, {"name":
"comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}]}]}, {"name":
"otherInterpretationResults", "type": ["null", {"type": "array", "items": "InterpretedGenomeRD"}]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"interpretationMetaData",
"otherInterpretationResults",
"tieringResult",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'interpretationMetaData': InterpretationRequestRD,
'otherInterpretationResults': InterpretedGenomeRD,
'tieringResult': InterpretedGenomeRD,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'interpretationMetaData': InterpretationRequestRD,
'otherInterpretationResults': InterpretedGenomeRD,
'tieringResult': InterpretedGenomeRD,
}
return embeddedTypes[fieldName]
__slots__ = [
'interpretationMetaData', 'otherInterpretationResults',
'tieringResult'
]
def __init__(self, **kwargs):
self.interpretationMetaData = kwargs.get(
'interpretationMetaData', InterpretationRequestRD())
self.otherInterpretationResults = kwargs.get(
'otherInterpretationResults', None)
self.tieringResult = kwargs.get(
'tieringResult', None)
class InterpretationFlag(ProtocolElement):
"""
A given interpretation flag together with an optional description
"""
_schemaSource = """
{"type": "record", "name": "InterpretationFlag", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "interpretationFlag", "type": {"type": "enum", "name":
"InterpretationFlags", "doc": "", "symbols": ["mixed_chemistries", "mixedLab_preparation",
"low_tumour_purity", "uniparental_isodisomy", "uniparental_heterodisomy", "unusual_karyotype",
"high_cnv_count", "high_estimate_human_contamination_fraction", "mixed_recruiting_gmc",
"suspected_mosaicism", "low_quality_sample", "ffpe_tumour_sample", "ff_nano_tumour_sample",
"missing_values_for_proband_in_reported_variant", "reissued", "supplementary_report_errors",
"internal_use_only", "high_priority", "other"]}, "doc": ""}, {"name": "additionalDescription",
"type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalDescription",
"interpretationFlag",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'additionalDescription', 'interpretationFlag'
]
def __init__(self, **kwargs):
self.additionalDescription = kwargs.get(
'additionalDescription', None)
self.interpretationFlag = kwargs.get(
'interpretationFlag', None)
class InterpretationFlags(object):
"""
Some flags relevant to the interpretation of a case
"""
mixed_chemistries = "mixed_chemistries"
mixedLab_preparation = "mixedLab_preparation"
low_tumour_purity = "low_tumour_purity"
uniparental_isodisomy = "uniparental_isodisomy"
uniparental_heterodisomy = "uniparental_heterodisomy"
unusual_karyotype = "unusual_karyotype"
high_cnv_count = "high_cnv_count"
high_estimate_human_contamination_fraction = "high_estimate_human_contamination_fraction"
mixed_recruiting_gmc = "mixed_recruiting_gmc"
suspected_mosaicism = "suspected_mosaicism"
low_quality_sample = "low_quality_sample"
ffpe_tumour_sample = "ffpe_tumour_sample"
ff_nano_tumour_sample = "ff_nano_tumour_sample"
missing_values_for_proband_in_reported_variant = "missing_values_for_proband_in_reported_variant"
reissued = "reissued"
supplementary_report_errors = "supplementary_report_errors"
internal_use_only = "internal_use_only"
high_priority = "high_priority"
other = "other"
def __hash__(self):
return str(self).__hash__()
class InterpretationRequestRD(ProtocolElement):
"""
This record represents basic information for this report
"""
_schemaSource = """
{"type": "record", "name": "InterpretationRequestRD", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "versionControl", "type": {"type": "record", "name":
"ReportVersionControl", "fields": [{"name": "gitVersionControl", "type": "string", "doc": "",
"default": "5.0.0"}]}, "doc": ""}, {"name": "interpretationRequestId", "type": "string", "doc": ""},
{"name": "interpretationRequestVersion", "type": "int", "doc": ""}, {"name": "internalStudyId",
"type": "string", "doc": ""}, {"name": "genomeAssembly", "type": {"type": "enum", "name":
"Assembly", "doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}, {"name": "workspace", "type":
{"type": "array", "items": "string"}, "doc": ""}, {"name": "bams", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "File", "doc": "", "fields": [{"name": "sampleId",
"type": ["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "uriFile", "type":
"string", "doc": ""}, {"name": "fileType", "type": {"type": "enum", "name": "FileType", "symbols":
["BAM", "gVCF", "VCF_small", "VCF_somatic_small", "VCF_CNV", "VCF_somatic_CNV", "VCF_SV",
"VCF_somatic_SV", "VCF_SV_CNV", "SVG", "ANN", "BigWig", "MD5Sum", "ROH", "OTHER", "PARTITION",
"VARIANT_FREQUENCIES", "COVERAGE"]}, "doc": ""}, {"name": "md5Sum", "type": ["null", "string"],
"doc": ""}]}}], "doc": ""}, {"name": "vcfs", "type": ["null", {"type": "array", "items": "File"}],
"doc": ""}, {"name": "bigWigs", "type": ["null", {"type": "array", "items": "File"}], "doc": ""},
{"name": "pedigreeDiagram", "type": ["null", "File"], "doc": ""}, {"name": "annotationFile", "type":
["null", "File"], "doc": ""}, {"name": "otherFiles", "type": ["null", {"type": "map", "values":
"File"}], "doc": ""}, {"name": "pedigree", "type": ["null", {"type": "record", "name": "Pedigree",
"namespace": "org.gel.models.participant.avro", "doc": "", "fields": [{"name": "versionControl",
"type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc": ""}, {"name":
"LDPCode", "type": ["null", "string"], "doc": ""}, {"name": "familyId", "type": "string", "doc":
""}, {"name": "members", "type": {"type": "array", "items": {"type": "record", "name":
"PedigreeMember", "doc": "", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""},
{"name": "isProband", "type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type":
["null", "string"], "doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum",
"name": "ParticipantQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}, {"name": "gelSuperFamilyId", "type": ["null",
"string"], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "", "symbols":
["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "personKaryotypicSex", "type": ["null",
{"type": "enum", "name": "PersonKaryotipicSex", "doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO",
"XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY", "OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type":
["null", "int"], "doc": ""}, {"name": "fatherId", "type": ["null", "int"], "doc": ""}, {"name":
"motherId", "type": ["null", "int"], "doc": ""}, {"name": "superFatherId", "type": ["null", "int"],
"doc": ""}, {"name": "superMotherId", "type": ["null", "int"], "doc": ""}, {"name": "twinGroup",
"type": ["null", "int"], "doc": ""}, {"name": "monozygotic", "type": ["null", {"type": "enum",
"name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name":
"adoptedStatus", "type": ["null", {"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols":
["notadopted", "adoptedin", "adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null",
{"type": "enum", "name": "LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED",
"UNBORN", "STILLBORN", "MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type":
["null", "TernaryOption"], "doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum",
"name": "AffectionStatus", "doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc":
""}, {"name": "disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"Disorder", "doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""},
{"name": "diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease",
"type": ["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc":
""}]}}], "doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""},
{"name": "termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber",
"type": ["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record",
"name": "HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum",
"name": "Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name":
"progression", "type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}}], "doc": ""}, {"name": "inbreedingCoefficient", "type": ["null",
{"type": "record", "name": "InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId",
"type": "string", "doc": ""}, {"name": "program", "type": "string", "doc": ""}, {"name": "version",
"type": "string", "doc": ""}, {"name": "estimationMethod", "type": "string", "doc": ""}, {"name":
"coefficient", "type": "double", "doc": ""}, {"name": "standardError", "type": ["null", "double"],
"doc": ""}]}], "doc": ""}, {"name": "additionalInformation", "type": ["null", {"type": "map",
"values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "analysisPanels", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "AnalysisPanel", "doc": "", "fields":
[{"name": "specificDisease", "type": "string", "doc": ""}, {"name": "panelName", "type": "string",
"doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}, {"name":
"reviewOutcome", "type": "string", "doc": ""}, {"name": "multipleGeneticOrigins", "type": "string",
"doc": ""}]}}], "doc": ""}, {"name": "diseasePenetrances", "type": ["null", {"type": "array",
"items": {"type": "record", "name": "DiseasePenetrance", "doc": "", "fields": [{"name":
"specificDisease", "type": "string", "doc": ""}, {"name": "penetrance", "type": {"type": "enum",
"name": "Penetrance", "doc": "", "symbols": ["complete", "incomplete"]}, "doc": ""}]}}], "doc": ""},
{"name": "readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "familyQCState", "type":
["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}]}], "doc": ""}, {"name": "otherFamilyHistory",
"type": ["null", {"type": "record", "name": "OtherFamilyHistory", "doc": "", "fields": [{"name":
"maternalFamilyHistory", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "paternalFamilyHistory", "type": ["null", {"type": "array", "items": "string"}], "doc":
""}]}], "doc": ""}, {"name": "genePanelsCoverage", "type": ["null", {"type": "map", "values":
{"type": "map", "values": {"type": "map", "values": "float"}}}], "doc": ""}, {"name":
"interpretationFlags", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"InterpretationFlag", "doc": "", "fields": [{"name": "interpretationFlag", "type": {"type": "enum",
"name": "InterpretationFlags", "doc": "", "symbols": ["mixed_chemistries", "mixedLab_preparation",
"low_tumour_purity", "uniparental_isodisomy", "uniparental_heterodisomy", "unusual_karyotype",
"high_cnv_count", "high_estimate_human_contamination_fraction", "mixed_recruiting_gmc",
"suspected_mosaicism", "low_quality_sample", "ffpe_tumour_sample", "ff_nano_tumour_sample",
"missing_values_for_proband_in_reported_variant", "reissued", "supplementary_report_errors",
"internal_use_only", "high_priority", "other"]}, "doc": ""}, {"name": "additionalDescription",
"type": ["null", "string"], "doc": ""}]}}], "doc": ""}, {"name": "additionalInfo", "type": ["null",
{"type": "map", "values": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInfo",
"annotationFile",
"bams",
"bigWigs",
"genePanelsCoverage",
"genomeAssembly",
"internalStudyId",
"interpretationFlags",
"interpretationRequestId",
"interpretationRequestVersion",
"otherFamilyHistory",
"otherFiles",
"pedigree",
"pedigreeDiagram",
"vcfs",
"versionControl",
"workspace",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'annotationFile': File,
'bams': File,
'bigWigs': File,
'interpretationFlags': InterpretationFlag,
'otherFamilyHistory': OtherFamilyHistory,
'otherFiles': File,
'pedigree': Pedigree,
'pedigreeDiagram': File,
'vcfs': File,
'versionControl': ReportVersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'annotationFile': File,
'bams': File,
'bigWigs': File,
'interpretationFlags': InterpretationFlag,
'otherFamilyHistory': OtherFamilyHistory,
'otherFiles': File,
'pedigree': Pedigree,
'pedigreeDiagram': File,
'vcfs': File,
'versionControl': ReportVersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInfo', 'annotationFile', 'bams', 'bigWigs',
'genePanelsCoverage', 'genomeAssembly', 'internalStudyId',
'interpretationFlags', 'interpretationRequestId',
'interpretationRequestVersion', 'otherFamilyHistory',
'otherFiles', 'pedigree', 'pedigreeDiagram', 'vcfs',
'versionControl', 'workspace'
]
def __init__(self, **kwargs):
self.additionalInfo = kwargs.get(
'additionalInfo', None)
self.annotationFile = kwargs.get(
'annotationFile', None)
self.bams = kwargs.get(
'bams', None)
self.bigWigs = kwargs.get(
'bigWigs', None)
self.genePanelsCoverage = kwargs.get(
'genePanelsCoverage', None)
self.genomeAssembly = kwargs.get(
'genomeAssembly', None)
self.internalStudyId = kwargs.get(
'internalStudyId', None)
self.interpretationFlags = kwargs.get(
'interpretationFlags', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.otherFamilyHistory = kwargs.get(
'otherFamilyHistory', None)
self.otherFiles = kwargs.get(
'otherFiles', None)
self.pedigree = kwargs.get(
'pedigree', None)
self.pedigreeDiagram = kwargs.get(
'pedigreeDiagram', None)
self.vcfs = kwargs.get(
'vcfs', None)
self.versionControl = kwargs.get(
'versionControl', ReportVersionControl())
self.workspace = kwargs.get(
'workspace', None)
class InterpretedGenomeRD(ProtocolElement):
"""
A interpreted genome for the rare disease program. This holds the
list of candidate variants reported by an interpretation
service together with all the relevant information that identify
the case and how these conclusions were reached.
"""
_schemaSource = """
{"type": "record", "name": "InterpretedGenomeRD", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "versionControl", "type": {"type": "record", "name": "ReportVersionControl",
"fields": [{"name": "gitVersionControl", "type": "string", "doc": "", "default": "5.0.0"}]}, "doc":
""}, {"name": "interpretationRequestId", "type": "string", "doc": ""}, {"name":
"interpretationRequestVersion", "type": "int", "doc": ""}, {"name": "interpretationService", "type":
"string", "doc": ""}, {"name": "reportUrl", "type": ["null", "string"], "doc": ""}, {"name":
"variants", "type": {"type": "array", "items": {"type": "record", "name": "ReportedVariant", "doc":
"", "fields": [{"name": "variantCoordinates", "type": {"type": "record", "name":
"VariantCoordinates", "doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""},
{"name": "position", "type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""},
{"name": "alternate", "type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum",
"name": "Assembly", "doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name":
"dbSnpId", "type": ["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type":
"array", "items": "string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEvent", "doc": "", "fields": [{"name": "reportEventId",
"type": "string", "doc": ""}, {"name": "phenotypes", "type": {"type": "array", "items": "string"},
"doc": ""}, {"name": "variantConsequences", "type": {"type": "array", "items": {"type": "record",
"name": "VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""},
{"name": "name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel",
"type": ["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName",
"type": "string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}],
"doc": ""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name":
"ReportedModeOfInheritance", "doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance", "namespace":
"org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}}, "doc":
""}, {"name": "referenceDatabasesVersions", "type": {"type": "map", "values": "string"}, "doc": ""},
{"name": "softwareVersions", "type": {"type": "map", "values": "string"}, "doc": ""}, {"name":
"comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"comments",
"interpretationRequestId",
"interpretationRequestVersion",
"interpretationService",
"referenceDatabasesVersions",
"reportUrl",
"softwareVersions",
"variants",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariant,
'versionControl': ReportVersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'variants': ReportedVariant,
'versionControl': ReportVersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'comments', 'interpretationRequestId',
'interpretationRequestVersion', 'interpretationService',
'referenceDatabasesVersions', 'reportUrl', 'softwareVersions',
'variants', 'versionControl'
]
def __init__(self, **kwargs):
self.comments = kwargs.get(
'comments', None)
self.interpretationRequestId = kwargs.get(
'interpretationRequestId', None)
self.interpretationRequestVersion = kwargs.get(
'interpretationRequestVersion', None)
self.interpretationService = kwargs.get(
'interpretationService', None)
self.referenceDatabasesVersions = kwargs.get(
'referenceDatabasesVersions', None)
self.reportUrl = kwargs.get(
'reportUrl', None)
self.softwareVersions = kwargs.get(
'softwareVersions', None)
self.variants = kwargs.get(
'variants', None)
self.versionControl = kwargs.get(
'versionControl', ReportVersionControl())
class KgPopCategory(object):
"""
1K Genomes project populations
"""
ACB = "ACB"
ASW = "ASW"
BEB = "BEB"
CDX = "CDX"
CEU = "CEU"
CHB = "CHB"
CHS = "CHS"
CLM = "CLM"
ESN = "ESN"
FIN = "FIN"
GBR = "GBR"
GIH = "GIH"
GWD = "GWD"
IBS = "IBS"
ITU = "ITU"
JPT = "JPT"
KHV = "KHV"
LWK = "LWK"
MSL = "MSL"
MXL = "MXL"
PEL = "PEL"
PJL = "PJL"
PUR = "PUR"
STU = "STU"
TSI = "TSI"
YRI = "YRI"
def __hash__(self):
return str(self).__hash__()
class KgSuperPopCategory(object):
"""
1K Genomes project super populations
"""
AFR = "AFR"
AMR = "AMR"
EAS = "EAS"
EUR = "EUR"
SAS = "SAS"
def __hash__(self):
return str(self).__hash__()
class Laterality(object):
"""
No documentation
"""
RIGHT = "RIGHT"
UNILATERAL = "UNILATERAL"
BILATERAL = "BILATERAL"
LEFT = "LEFT"
def __hash__(self):
return str(self).__hash__()
class LifeStatus(object):
"""
Life Status
"""
ALIVE = "ALIVE"
ABORTED = "ABORTED"
DECEASED = "DECEASED"
UNBORN = "UNBORN"
STILLBORN = "STILLBORN"
MISCARRIAGE = "MISCARRIAGE"
def __hash__(self):
return str(self).__hash__()
class MatchedSamples(ProtocolElement):
"""
This defines a pair of germline and tumor, this pair should/must
be analyzed together
"""
_schemaSource = """
{"type": "record", "name": "MatchedSamples", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "germlineSampleId", "type": ["null", "string"], "doc": ""}, {"name":
"tumourSampleId", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"germlineSampleId",
"tumourSampleId",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'germlineSampleId', 'tumourSampleId'
]
def __init__(self, **kwargs):
self.germlineSampleId = kwargs.get(
'germlineSampleId', None)
self.tumourSampleId = kwargs.get(
'tumourSampleId', None)
class Method(object):
"""
No documentation
"""
RESECTION = "RESECTION"
BIOPSY = "BIOPSY"
BLOOD = "BLOOD"
def __hash__(self):
return str(self).__hash__()
class ModifiedVariant(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "ModifiedVariant", "namespace": "org.gel.models.report.avro", "fields":
[{"name": "previousVariant", "type": {"type": "record", "name": "ReportedVariant", "doc": "",
"fields": [{"name": "variantCoordinates", "type": {"type": "record", "name": "VariantCoordinates",
"doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position",
"type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate",
"type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly",
"doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type":
["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEvent", "doc": "", "fields": [{"name": "reportEventId",
"type": "string", "doc": ""}, {"name": "phenotypes", "type": {"type": "array", "items": "string"},
"doc": ""}, {"name": "variantConsequences", "type": {"type": "array", "items": {"type": "record",
"name": "VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""},
{"name": "name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel",
"type": ["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName",
"type": "string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}],
"doc": ""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name":
"ReportedModeOfInheritance", "doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance", "namespace":
"org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}},
{"name": "modifiedVariant", "type": "ReportedVariant"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"modifiedVariant",
"previousVariant",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiedVariant': ReportedVariant,
'previousVariant': ReportedVariant,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'modifiedVariant': ReportedVariant,
'previousVariant': ReportedVariant,
}
return embeddedTypes[fieldName]
__slots__ = [
'modifiedVariant', 'previousVariant'
]
def __init__(self, **kwargs):
self.modifiedVariant = kwargs.get(
'modifiedVariant', ReportedVariant())
self.previousVariant = kwargs.get(
'previousVariant', ReportedVariant())
class OtherFamilyHistory(ProtocolElement):
"""
Family history for secondary findings. Arrays of strings
describing discrete family history phenotypes. Usually:
`EndocrineTumours`, `colorectal`, `BreastOvarian` and `HDOrStroke`
but can be others
"""
_schemaSource = """
{"type": "record", "name": "OtherFamilyHistory", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "maternalFamilyHistory", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "paternalFamilyHistory", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"maternalFamilyHistory",
"paternalFamilyHistory",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'maternalFamilyHistory', 'paternalFamilyHistory'
]
def __init__(self, **kwargs):
self.maternalFamilyHistory = kwargs.get(
'maternalFamilyHistory', None)
self.paternalFamilyHistory = kwargs.get(
'paternalFamilyHistory', None)
class ParticipantQCState(object):
"""
QCState Status
"""
noState = "noState"
passedMedicalReviewReadyForInterpretation = "passedMedicalReviewReadyForInterpretation"
passedMedicalReviewNotReadyForInterpretation = "passedMedicalReviewNotReadyForInterpretation"
queryToGel = "queryToGel"
queryToGMC = "queryToGMC"
failed = "failed"
def __hash__(self):
return str(self).__hash__()
class Pedigree(ProtocolElement):
"""
This is the concept of a family with associated phenotypes as
present in the record RDParticipant
"""
_schemaSource = """
{"type": "record", "name": "Pedigree", "namespace": "org.gel.models.participant.avro", "doc": "",
"fields": [{"name": "versionControl", "type": ["null", {"type": "record", "name": "VersionControl",
"fields": [{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc":
""}, {"name": "LDPCode", "type": ["null", "string"], "doc": ""}, {"name": "familyId", "type":
"string", "doc": ""}, {"name": "members", "type": {"type": "array", "items": {"type": "record",
"name": "PedigreeMember", "doc": "", "fields": [{"name": "pedigreeId", "type": ["null", "int"],
"doc": ""}, {"name": "isProband", "type": ["null", "boolean"], "doc": ""}, {"name": "participantId",
"type": ["null", "string"], "doc": ""}, {"name": "participantQCState", "type": ["null", {"type":
"enum", "name": "ParticipantQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}, {"name": "gelSuperFamilyId", "type": ["null",
"string"], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "", "symbols":
["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "personKaryotypicSex", "type": ["null",
{"type": "enum", "name": "PersonKaryotipicSex", "doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO",
"XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY", "OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type":
["null", "int"], "doc": ""}, {"name": "fatherId", "type": ["null", "int"], "doc": ""}, {"name":
"motherId", "type": ["null", "int"], "doc": ""}, {"name": "superFatherId", "type": ["null", "int"],
"doc": ""}, {"name": "superMotherId", "type": ["null", "int"], "doc": ""}, {"name": "twinGroup",
"type": ["null", "int"], "doc": ""}, {"name": "monozygotic", "type": ["null", {"type": "enum",
"name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name":
"adoptedStatus", "type": ["null", {"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols":
["notadopted", "adoptedin", "adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null",
{"type": "enum", "name": "LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED",
"UNBORN", "STILLBORN", "MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type":
["null", "TernaryOption"], "doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum",
"name": "AffectionStatus", "doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc":
""}, {"name": "disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"Disorder", "doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""},
{"name": "diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease",
"type": ["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc":
""}]}}], "doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""},
{"name": "termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber",
"type": ["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record",
"name": "HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum",
"name": "Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name":
"progression", "type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}}], "doc": ""}, {"name": "inbreedingCoefficient", "type": ["null",
{"type": "record", "name": "InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId",
"type": "string", "doc": ""}, {"name": "program", "type": "string", "doc": ""}, {"name": "version",
"type": "string", "doc": ""}, {"name": "estimationMethod", "type": "string", "doc": ""}, {"name":
"coefficient", "type": "double", "doc": ""}, {"name": "standardError", "type": ["null", "double"],
"doc": ""}]}], "doc": ""}, {"name": "additionalInformation", "type": ["null", {"type": "map",
"values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "analysisPanels", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "AnalysisPanel", "doc": "", "fields":
[{"name": "specificDisease", "type": "string", "doc": ""}, {"name": "panelName", "type": "string",
"doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}, {"name":
"reviewOutcome", "type": "string", "doc": ""}, {"name": "multipleGeneticOrigins", "type": "string",
"doc": ""}]}}], "doc": ""}, {"name": "diseasePenetrances", "type": ["null", {"type": "array",
"items": {"type": "record", "name": "DiseasePenetrance", "doc": "", "fields": [{"name":
"specificDisease", "type": "string", "doc": ""}, {"name": "penetrance", "type": {"type": "enum",
"name": "Penetrance", "doc": "", "symbols": ["complete", "incomplete"]}, "doc": ""}]}}], "doc": ""},
{"name": "readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "familyQCState", "type":
["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"analysisPanels",
"diseasePenetrances",
"familyId",
"familyQCState",
"members",
"readyForAnalysis",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'analysisPanels': AnalysisPanel,
'diseasePenetrances': DiseasePenetrance,
'members': PedigreeMember,
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'analysisPanels': AnalysisPanel,
'diseasePenetrances': DiseasePenetrance,
'members': PedigreeMember,
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'analysisPanels', 'diseasePenetrances', 'familyId',
'familyQCState', 'members', 'readyForAnalysis',
'versionControl'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.analysisPanels = kwargs.get(
'analysisPanels', None)
self.diseasePenetrances = kwargs.get(
'diseasePenetrances', None)
self.familyId = kwargs.get(
'familyId', None)
self.familyQCState = kwargs.get(
'familyQCState', None)
self.members = kwargs.get(
'members', None)
self.readyForAnalysis = kwargs.get(
'readyForAnalysis', None)
self.versionControl = kwargs.get(
'versionControl', None)
class PedigreeMember(ProtocolElement):
"""
This defines a RD Participant (demographics and pedigree
information)
"""
_schemaSource = """
{"type": "record", "name": "PedigreeMember", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""}, {"name": "isProband",
"type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type": ["null", "string"],
"doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum", "name":
"ParticipantQCState", "doc": "", "symbols": ["noState", "passedMedicalReviewReadyForInterpretation",
"passedMedicalReviewNotReadyForInterpretation", "queryToGel", "queryToGMC", "failed"]}], "doc": ""},
{"name": "gelSuperFamilyId", "type": ["null", "string"], "doc": ""}, {"name": "sex", "type":
{"type": "enum", "name": "Sex", "doc": "", "symbols": ["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""},
{"name": "personKaryotypicSex", "type": ["null", {"type": "enum", "name": "PersonKaryotipicSex",
"doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO", "XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY",
"OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type": ["null", "int"], "doc": ""}, {"name":
"fatherId", "type": ["null", "int"], "doc": ""}, {"name": "motherId", "type": ["null", "int"],
"doc": ""}, {"name": "superFatherId", "type": ["null", "int"], "doc": ""}, {"name": "superMotherId",
"type": ["null", "int"], "doc": ""}, {"name": "twinGroup", "type": ["null", "int"], "doc": ""},
{"name": "monozygotic", "type": ["null", {"type": "enum", "name": "TernaryOption", "doc": "",
"symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name": "adoptedStatus", "type": ["null",
{"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols": ["notadopted", "adoptedin",
"adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null", {"type": "enum", "name":
"LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED", "UNBORN", "STILLBORN",
"MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type": ["null", "TernaryOption"],
"doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum", "name": "AffectionStatus",
"doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc": ""}, {"name":
"disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Disorder",
"doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""}, {"name":
"diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease", "type":
["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc": ""}]}}],
"doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""}, {"name":
"termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber", "type":
["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record", "name":
"HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum", "name":
"Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name": "progression",
"type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}}], "doc": ""}, {"name": "inbreedingCoefficient", "type": ["null",
{"type": "record", "name": "InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId",
"type": "string", "doc": ""}, {"name": "program", "type": "string", "doc": ""}, {"name": "version",
"type": "string", "doc": ""}, {"name": "estimationMethod", "type": "string", "doc": ""}, {"name":
"coefficient", "type": "double", "doc": ""}, {"name": "standardError", "type": ["null", "double"],
"doc": ""}]}], "doc": ""}, {"name": "additionalInformation", "type": ["null", {"type": "map",
"values": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalInformation",
"adoptedStatus",
"affectionStatus",
"ancestries",
"consanguineousParents",
"consentStatus",
"disorderList",
"fatherId",
"gelSuperFamilyId",
"hpoTermList",
"inbreedingCoefficient",
"isProband",
"lifeStatus",
"monozygotic",
"motherId",
"participantId",
"participantQCState",
"pedigreeId",
"personKaryotypicSex",
"samples",
"sex",
"superFatherId",
"superMotherId",
"twinGroup",
"yearOfBirth",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'ancestries': Ancestries,
'consentStatus': ConsentStatus,
'disorderList': Disorder,
'hpoTermList': HpoTerm,
'inbreedingCoefficient': InbreedingCoefficient,
'samples': Sample,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'ancestries': Ancestries,
'consentStatus': ConsentStatus,
'disorderList': Disorder,
'hpoTermList': HpoTerm,
'inbreedingCoefficient': InbreedingCoefficient,
'samples': Sample,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalInformation', 'adoptedStatus', 'affectionStatus',
'ancestries', 'consanguineousParents', 'consentStatus',
'disorderList', 'fatherId', 'gelSuperFamilyId', 'hpoTermList',
'inbreedingCoefficient', 'isProband', 'lifeStatus',
'monozygotic', 'motherId', 'participantId',
'participantQCState', 'pedigreeId', 'personKaryotypicSex',
'samples', 'sex', 'superFatherId', 'superMotherId',
'twinGroup', 'yearOfBirth'
]
def __init__(self, **kwargs):
self.additionalInformation = kwargs.get(
'additionalInformation', None)
self.adoptedStatus = kwargs.get(
'adoptedStatus', None)
self.affectionStatus = kwargs.get(
'affectionStatus', None)
self.ancestries = kwargs.get(
'ancestries', None)
self.consanguineousParents = kwargs.get(
'consanguineousParents', None)
self.consentStatus = kwargs.get(
'consentStatus', None)
self.disorderList = kwargs.get(
'disorderList', None)
self.fatherId = kwargs.get(
'fatherId', None)
self.gelSuperFamilyId = kwargs.get(
'gelSuperFamilyId', None)
self.hpoTermList = kwargs.get(
'hpoTermList', None)
self.inbreedingCoefficient = kwargs.get(
'inbreedingCoefficient', None)
self.isProband = kwargs.get(
'isProband', None)
self.lifeStatus = kwargs.get(
'lifeStatus', None)
self.monozygotic = kwargs.get(
'monozygotic', None)
self.motherId = kwargs.get(
'motherId', None)
self.participantId = kwargs.get(
'participantId', None)
self.participantQCState = kwargs.get(
'participantQCState', None)
self.pedigreeId = kwargs.get(
'pedigreeId', None)
self.personKaryotypicSex = kwargs.get(
'personKaryotypicSex', None)
self.samples = kwargs.get(
'samples', None)
self.sex = kwargs.get(
'sex', None)
self.superFatherId = kwargs.get(
'superFatherId', None)
self.superMotherId = kwargs.get(
'superMotherId', None)
self.twinGroup = kwargs.get(
'twinGroup', None)
self.yearOfBirth = kwargs.get(
'yearOfBirth', None)
class Penetrance(object):
"""
Penetrance assumed in the analysis
"""
complete = "complete"
incomplete = "incomplete"
def __hash__(self):
return str(self).__hash__()
class PersonKaryotipicSex(object):
"""
Karyotipic Sex
"""
UNKNOWN = "UNKNOWN"
XX = "XX"
XY = "XY"
XO = "XO"
XXY = "XXY"
XXX = "XXX"
XXYY = "XXYY"
XXXY = "XXXY"
XXXX = "XXXX"
XYY = "XYY"
OTHER = "OTHER"
def __hash__(self):
return str(self).__hash__()
class PhenotypesSolved(object):
"""
No documentation
"""
yes = "yes"
no = "no"
partially = "partially"
unknown = "unknown"
def __hash__(self):
return str(self).__hash__()
class PreparationMethod(object):
"""
No documentation
"""
EDTA = "EDTA"
ORAGENE = "ORAGENE"
FF = "FF"
FFPE = "FFPE"
CD128_SORTED_CELLS = "CD128_SORTED_CELLS"
ASPIRATE = "ASPIRATE"
def __hash__(self):
return str(self).__hash__()
class Product(object):
"""
No documentation
"""
DNA = "DNA"
RNA = "RNA"
def __hash__(self):
return str(self).__hash__()
class Program(object):
"""
The Genomics England program
"""
cancer = "cancer"
rare_disease = "rare_disease"
def __hash__(self):
return str(self).__hash__()
class ProgrammePhase(object):
"""
No documentation
"""
CRUK = "CRUK"
OXFORD = "OXFORD"
CLL = "CLL"
IIP = "IIP"
MAIN = "MAIN"
EXPT = "EXPT"
def __hash__(self):
return str(self).__hash__()
class Progression(object):
"""
No documentation
"""
PROGRESSIVE = "PROGRESSIVE"
NONPROGRESSIVE = "NONPROGRESSIVE"
def __hash__(self):
return str(self).__hash__()
class RDFamilyChange(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "RDFamilyChange", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "FamilyId", "type": "string", "doc": ""}, {"name": "code", "type": {"type":
"enum", "name": "RDFamilyChangeCode", "doc": "", "symbols": ["FamilyAdded", "FamilyDeleted",
"ProbandChanged", "ParticipantAdded", "ParticipantRemoved", "ConsentStatusChanged",
"AffectionStatusChanged", "PanelAssignmentChanged", "SexChanged", "SampleChanged"]}, "doc": ""},
{"name": "Family", "type": {"type": "record", "name": "Pedigree", "doc": "", "fields": [{"name":
"versionControl", "type": ["null", {"type": "record", "name": "VersionControl", "fields": [{"name":
"GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}], "doc": ""}, {"name":
"LDPCode", "type": ["null", "string"], "doc": ""}, {"name": "familyId", "type": "string", "doc":
""}, {"name": "members", "type": {"type": "array", "items": {"type": "record", "name":
"PedigreeMember", "doc": "", "fields": [{"name": "pedigreeId", "type": ["null", "int"], "doc": ""},
{"name": "isProband", "type": ["null", "boolean"], "doc": ""}, {"name": "participantId", "type":
["null", "string"], "doc": ""}, {"name": "participantQCState", "type": ["null", {"type": "enum",
"name": "ParticipantQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}, {"name": "gelSuperFamilyId", "type": ["null",
"string"], "doc": ""}, {"name": "sex", "type": {"type": "enum", "name": "Sex", "doc": "", "symbols":
["MALE", "FEMALE", "UNKNOWN"]}, "doc": ""}, {"name": "personKaryotypicSex", "type": ["null",
{"type": "enum", "name": "PersonKaryotipicSex", "doc": "", "symbols": ["UNKNOWN", "XX", "XY", "XO",
"XXY", "XXX", "XXYY", "XXXY", "XXXX", "XYY", "OTHER"]}], "doc": ""}, {"name": "yearOfBirth", "type":
["null", "int"], "doc": ""}, {"name": "fatherId", "type": ["null", "int"], "doc": ""}, {"name":
"motherId", "type": ["null", "int"], "doc": ""}, {"name": "superFatherId", "type": ["null", "int"],
"doc": ""}, {"name": "superMotherId", "type": ["null", "int"], "doc": ""}, {"name": "twinGroup",
"type": ["null", "int"], "doc": ""}, {"name": "monozygotic", "type": ["null", {"type": "enum",
"name": "TernaryOption", "doc": "", "symbols": ["yes", "no", "unknown"]}], "doc": ""}, {"name":
"adoptedStatus", "type": ["null", {"type": "enum", "name": "AdoptedStatus", "doc": "", "symbols":
["notadopted", "adoptedin", "adoptedout"]}], "doc": ""}, {"name": "lifeStatus", "type": ["null",
{"type": "enum", "name": "LifeStatus", "doc": "", "symbols": ["ALIVE", "ABORTED", "DECEASED",
"UNBORN", "STILLBORN", "MISCARRIAGE"]}], "doc": ""}, {"name": "consanguineousParents", "type":
["null", "TernaryOption"], "doc": ""}, {"name": "affectionStatus", "type": ["null", {"type": "enum",
"name": "AffectionStatus", "doc": "", "symbols": ["UNAFFECTED", "AFFECTED", "UNCERTAIN"]}], "doc":
""}, {"name": "disorderList", "type": ["null", {"type": "array", "items": {"type": "record", "name":
"Disorder", "doc": "", "fields": [{"name": "diseaseGroup", "type": ["null", "string"], "doc": ""},
{"name": "diseaseSubGroup", "type": ["null", "string"], "doc": ""}, {"name": "specificDisease",
"type": ["null", "string"], "doc": ""}, {"name": "ageOfOnset", "type": ["null", "float"], "doc":
""}]}}], "doc": ""}, {"name": "hpoTermList", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "HpoTerm", "doc": "", "fields": [{"name": "term", "type": "string", "doc": ""},
{"name": "termPresence", "type": ["null", "TernaryOption"], "doc": ""}, {"name": "hpoBuildNumber",
"type": ["null", "string"], "doc": ""}, {"name": "modifiers", "type": ["null", {"type": "record",
"name": "HpoTermModifiers", "fields": [{"name": "laterality", "type": ["null", {"type": "enum",
"name": "Laterality", "symbols": ["RIGHT", "UNILATERAL", "BILATERAL", "LEFT"]}]}, {"name":
"progression", "type": ["null", {"type": "enum", "name": "Progression", "symbols": ["PROGRESSIVE",
"NONPROGRESSIVE"]}]}, {"name": "severity", "type": ["null", {"type": "enum", "name": "Severity",
"symbols": ["BORDERLINE", "MILD", "MODERATE", "SEVERE", "PROFOUND"]}]}, {"name": "spatialPattern",
"type": ["null", {"type": "enum", "name": "SpatialPattern", "symbols": ["DISTAL", "GENERALIZED",
"LOCALIZED", "PROXIMAL"]}]}]}], "doc": ""}, {"name": "ageOfOnset", "type": ["null", {"type": "enum",
"name": "AgeOfOnset", "symbols": ["EMBRYONAL_ONSET", "FETAL_ONSET", "NEONATAL_ONSET",
"INFANTILE_ONSET", "CHILDHOOD_ONSET", "JUVENILE_ONSET", "YOUNG_ADULT_ONSET", "LATE_ONSET",
"MIDDLE_AGE_ONSET"]}], "doc": ""}]}}], "doc": ""}, {"name": "ancestries", "type": ["null", {"type":
"record", "name": "Ancestries", "doc": "", "fields": [{"name": "mothersEthnicOrigin", "type":
["null", {"type": "enum", "name": "EthnicCategory", "doc": "", "symbols": ["D", "E", "F", "G", "A",
"B", "C", "L", "M", "N", "H", "J", "K", "P", "S", "R", "Z"]}], "doc": ""}, {"name":
"mothersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"fathersEthnicOrigin", "type": ["null", "EthnicCategory"], "doc": ""}, {"name":
"fathersOtherRelevantAncestry", "type": ["null", "string"], "doc": ""}, {"name":
"chiSquare1KGenomesPhase3Pop", "type": ["null", {"type": "array", "items": {"type": "record",
"name": "ChiSquare1KGenomesPhase3Pop", "doc": "", "fields": [{"name": "kgSuperPopCategory", "type":
{"type": "enum", "name": "KgSuperPopCategory", "doc": "", "symbols": ["AFR", "AMR", "EAS", "EUR",
"SAS"]}, "doc": ""}, {"name": "kgPopCategory", "type": ["null", {"type": "enum", "name":
"KgPopCategory", "doc": "", "symbols": ["ACB", "ASW", "BEB", "CDX", "CEU", "CHB", "CHS", "CLM",
"ESN", "FIN", "GBR", "GIH", "GWD", "IBS", "ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL",
"PUR", "STU", "TSI", "YRI"]}], "doc": ""}, {"name": "chiSquare", "type": "double", "doc": ""}]}}],
"doc": ""}]}], "doc": ""}, {"name": "consentStatus", "type": ["null", {"type": "record", "name":
"ConsentStatus", "doc": "", "fields": [{"name": "programmeConsent", "type": "boolean", "doc": "",
"default": false}, {"name": "primaryFindingConsent", "type": "boolean", "doc": "", "default":
false}, {"name": "secondaryFindingConsent", "type": "boolean", "doc": "", "default": false},
{"name": "carrierStatusConsent", "type": "boolean", "doc": "", "default": false}]}], "doc": ""},
{"name": "samples", "type": ["null", {"type": "array", "items": {"type": "record", "name": "Sample",
"fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int",
"doc": ""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}}], "doc": ""}, {"name": "inbreedingCoefficient", "type": ["null",
{"type": "record", "name": "InbreedingCoefficient", "doc": "", "fields": [{"name": "sampleId",
"type": "string", "doc": ""}, {"name": "program", "type": "string", "doc": ""}, {"name": "version",
"type": "string", "doc": ""}, {"name": "estimationMethod", "type": "string", "doc": ""}, {"name":
"coefficient", "type": "double", "doc": ""}, {"name": "standardError", "type": ["null", "double"],
"doc": ""}]}], "doc": ""}, {"name": "additionalInformation", "type": ["null", {"type": "map",
"values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "analysisPanels", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "AnalysisPanel", "doc": "", "fields":
[{"name": "specificDisease", "type": "string", "doc": ""}, {"name": "panelName", "type": "string",
"doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}, {"name":
"reviewOutcome", "type": "string", "doc": ""}, {"name": "multipleGeneticOrigins", "type": "string",
"doc": ""}]}}], "doc": ""}, {"name": "diseasePenetrances", "type": ["null", {"type": "array",
"items": {"type": "record", "name": "DiseasePenetrance", "doc": "", "fields": [{"name":
"specificDisease", "type": "string", "doc": ""}, {"name": "penetrance", "type": {"type": "enum",
"name": "Penetrance", "doc": "", "symbols": ["complete", "incomplete"]}, "doc": ""}]}}], "doc": ""},
{"name": "readyForAnalysis", "type": "boolean", "doc": ""}, {"name": "familyQCState", "type":
["null", {"type": "enum", "name": "FamilyQCState", "doc": "", "symbols": ["noState",
"passedMedicalReviewReadyForInterpretation", "passedMedicalReviewNotReadyForInterpretation",
"queryToGel", "queryToGMC", "failed"]}], "doc": ""}]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"Family",
"FamilyId",
"code",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'Family': Pedigree,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'Family': Pedigree,
}
return embeddedTypes[fieldName]
__slots__ = [
'Family', 'FamilyId', 'code'
]
def __init__(self, **kwargs):
self.Family = kwargs.get(
'Family', Pedigree())
self.FamilyId = kwargs.get(
'FamilyId', None)
self.code = kwargs.get(
'code', None)
class RDFamilyChangeCode(object):
"""
This code define the change type: * `FamilyAdded`: This is a
new family. * `FamilyDeleted`: This family should be removed.
* `ProbandChanged`: The proband participant is now a different
member of the family. * `ParticipantAdded`: A new participant
has been sequenced and added to the family. *
`ParticipantRemoved`: A participant has been removed. *
`ConsentStatusChanged`: One or more participant in this family has
a different consent status. * `AffectionStatusChanged`:
HPOterms or Disorder changed in one or more participants in this
family. * `PanelAssignmentChanged`: Gene Panels has changed in
this family. * `SexChanged`: Sex has changed for one or more
participants in this family. * `SampleChanged`: The sample/s
associated to one or more participant in this family has changed.
"""
FamilyAdded = "FamilyAdded"
FamilyDeleted = "FamilyDeleted"
ProbandChanged = "ProbandChanged"
ParticipantAdded = "ParticipantAdded"
ParticipantRemoved = "ParticipantRemoved"
ConsentStatusChanged = "ConsentStatusChanged"
AffectionStatusChanged = "AffectionStatusChanged"
PanelAssignmentChanged = "PanelAssignmentChanged"
SexChanged = "SexChanged"
SampleChanged = "SampleChanged"
def __hash__(self):
return str(self).__hash__()
class RareDiseaseExitQuestionnaire(ProtocolElement):
"""
The rare disease program exit questionnaire
"""
_schemaSource = """
{"type": "record", "name": "RareDiseaseExitQuestionnaire", "namespace":
"org.gel.models.report.avro", "doc": "", "fields": [{"name": "eventDate", "type": "string", "doc":
""}, {"name": "reporter", "type": "string", "doc": ""}, {"name": "familyLevelQuestions", "type":
{"type": "record", "name": "FamilyLevelQuestions", "doc": "", "fields": [{"name":
"caseSolvedFamily", "type": {"type": "enum", "name": "CaseSolvedFamily", "symbols": ["yes", "no",
"partially", "unknown"]}, "doc": ""}, {"name": "segregationQuestion", "type": {"type": "enum",
"name": "SegregationQuestion", "symbols": ["yes", "no"]}, "doc": ""}, {"name": "additionalComments",
"type": "string", "doc": ""}]}, "doc": ""}, {"name": "variantGroupLevelQuestions", "type": {"type":
"array", "items": {"type": "record", "name": "VariantGroupLevelQuestions", "doc": "", "fields":
[{"name": "variantGroup", "type": "int", "doc": ""}, {"name": "variantLevelQuestions", "type":
{"type": "array", "items": {"type": "record", "name": "VariantLevelQuestions", "doc": "", "fields":
[{"name": "variantDetails", "type": "string", "doc": ""}, {"name": "confirmationDecision", "type":
{"type": "enum", "name": "ConfirmationDecision", "symbols": ["yes", "no", "na"]}, "doc": ""},
{"name": "confirmationOutcome", "type": {"type": "enum", "name": "ConfirmationOutcome", "symbols":
["yes", "no", "na"]}, "doc": ""}, {"name": "reportingQuestion", "type": {"type": "enum", "name":
"ReportingQuestion", "symbols": ["yes", "no", "na"]}, "doc": ""}, {"name": "acmgClassification",
"type": {"type": "enum", "name": "ACMGClassification", "symbols": ["pathogenic_variant",
"likely_pathogenic_variant", "variant_of_unknown_clinical_significance", "likely_benign_variant",
"benign_variant", "not_assessed"]}, "doc": ""}, {"name": "publications", "type": "string", "doc":
""}]}}, "doc": ""}, {"name": "actionability", "type": {"type": "enum", "name": "Actionability",
"symbols": ["yes", "no", "not_yet", "na"]}, "doc": ""}, {"name": "clinicalUtility", "type": {"type":
"array", "items": {"type": "enum", "name": "ClinicalUtility", "symbols": ["none",
"change_in_medication", "surgical_option", "additional_surveillance_for_proband_or_relatives",
"clinical_trial_eligibility", "informs_reproductive_choice", "unknown", "other"]}}, "doc": ""},
{"name": "phenotypesSolved", "type": {"type": "enum", "name": "PhenotypesSolved", "symbols": ["yes",
"no", "partially", "unknown"]}, "doc": ""}, {"name": "phenotypesExplained", "type": ["null",
{"type": "array", "items": "string"}], "doc": ""}]}}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"eventDate",
"familyLevelQuestions",
"reporter",
"variantGroupLevelQuestions",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'familyLevelQuestions': FamilyLevelQuestions,
'variantGroupLevelQuestions': VariantGroupLevelQuestions,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'familyLevelQuestions': FamilyLevelQuestions,
'variantGroupLevelQuestions': VariantGroupLevelQuestions,
}
return embeddedTypes[fieldName]
__slots__ = [
'eventDate', 'familyLevelQuestions', 'reporter',
'variantGroupLevelQuestions'
]
def __init__(self, **kwargs):
self.eventDate = kwargs.get(
'eventDate', None)
self.familyLevelQuestions = kwargs.get(
'familyLevelQuestions', FamilyLevelQuestions())
self.reporter = kwargs.get(
'reporter', None)
self.variantGroupLevelQuestions = kwargs.get(
'variantGroupLevelQuestions', None)
class ReportEvent(ProtocolElement):
"""
A report event holds all the information about why a given variant
is relevant to report. The same variant may have several
report events. For instance, we may have two report events from
the tiering process when two panels are analysed, a positive
report from a Genomic Medicine Centre (GMC) will correspond to an
additional report event.
"""
_schemaSource = """
{"type": "record", "name": "ReportEvent", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "reportEventId", "type": "string", "doc": ""}, {"name": "phenotypes", "type":
{"type": "array", "items": "string"}, "doc": ""}, {"name": "variantConsequences", "type": {"type":
"array", "items": {"type": "record", "name": "VariantConsequence", "doc": "", "fields": [{"name":
"id", "type": "string", "doc": ""}, {"name": "name", "type": ["null", "string"], "doc": ""}]}},
"doc": ""}, {"name": "genePanel", "type": ["null", {"type": "record", "name": "GenePanel", "doc":
"", "fields": [{"name": "panelName", "type": "string", "doc": ""}, {"name": "panelVersion", "type":
["null", "string"], "doc": ""}]}], "doc": ""}, {"name": "modeOfInheritance", "type": {"type":
"enum", "name": "ReportedModeOfInheritance", "doc": "", "symbols": ["monoallelic",
"monoallelic_not_imprinted", "monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted",
"biallelic", "monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic",
"xlinked_biallelic", "xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name":
"genomicEntities", "type": {"type": "array", "items": {"type": "record", "name": "GenomicEntity",
"doc": "", "fields": [{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc":
"", "symbols": ["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name":
"ensemblId", "type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc":
""}, {"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}},
"doc": ""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance",
"namespace": "org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}],
"doc": ""}, {"name": "score", "type": ["null", "float"], "doc": ""}, {"name":
"vendorSpecificScores", "type": ["null", {"type": "map", "values": "float"}], "doc": ""}, {"name":
"variantClassification", "type": ["null", {"type": "record", "name": "VariantClassification", "doc":
"", "fields": [{"name": "clinicalSignificance", "type": ["null", {"type": "enum", "name":
"ClinicalSignificance", "symbols": ["benign", "likely_benign", "VUS", "likely_pathogenic",
"pathogenic", "uncertain_significance"]}], "doc": ""}, {"name": "drugResponseClassification",
"type": ["null", {"type": "enum", "name": "DrugResponseClassification", "symbols": ["responsive",
"resistant", "toxicity", "indication", "contraindication", "dosing", "increased_monitoring",
"efficacy"]}], "doc": ""}, {"name": "traitAssociation", "type": ["null", {"type": "enum", "name":
"TraitAssociation", "symbols": ["established_risk_allele", "likely_risk_allele",
"uncertain_risk_allele", "protective"]}], "doc": ""}, {"name": "tumorigenesisClassification",
"type": ["null", {"type": "enum", "name": "TumorigenesisClassification", "symbols": ["driver",
"passenger", "modifier"]}], "doc": ""}, {"name": "functionalEffect", "type": ["null", {"type":
"enum", "name": "VariantFunctionalEffect", "symbols": ["dominant_negative_variant",
"gain_of_function_variant", "lethal_variant", "loss_of_function_variant", "loss_of_heterozygosity",
"null_variant"]}], "doc": ""}]}], "doc": ""}, {"name": "fullyExplainsPhenotype", "type": ["null",
"boolean"], "doc": ""}, {"name": "groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name":
"eventJustification", "type": ["null", "string"], "doc": ""}, {"name": "tier", "type": ["null",
{"type": "enum", "name": "Tier", "doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4",
"TIER5"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"eventJustification",
"fullyExplainsPhenotype",
"genePanel",
"genomicEntities",
"groupOfVariants",
"modeOfInheritance",
"penetrance",
"phenotypes",
"reportEventId",
"score",
"tier",
"variantClassification",
"variantConsequences",
"vendorSpecificScores",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'genePanel': GenePanel,
'genomicEntities': GenomicEntity,
'variantClassification': VariantClassification,
'variantConsequences': VariantConsequence,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'genePanel': GenePanel,
'genomicEntities': GenomicEntity,
'variantClassification': VariantClassification,
'variantConsequences': VariantConsequence,
}
return embeddedTypes[fieldName]
__slots__ = [
'eventJustification', 'fullyExplainsPhenotype', 'genePanel',
'genomicEntities', 'groupOfVariants', 'modeOfInheritance',
'penetrance', 'phenotypes', 'reportEventId', 'score', 'tier',
'variantClassification', 'variantConsequences',
'vendorSpecificScores'
]
def __init__(self, **kwargs):
self.eventJustification = kwargs.get(
'eventJustification', None)
self.fullyExplainsPhenotype = kwargs.get(
'fullyExplainsPhenotype', None)
self.genePanel = kwargs.get(
'genePanel', None)
self.genomicEntities = kwargs.get(
'genomicEntities', None)
self.groupOfVariants = kwargs.get(
'groupOfVariants', None)
self.modeOfInheritance = kwargs.get(
'modeOfInheritance', None)
self.penetrance = kwargs.get(
'penetrance', None)
self.phenotypes = kwargs.get(
'phenotypes', None)
self.reportEventId = kwargs.get(
'reportEventId', None)
self.score = kwargs.get(
'score', None)
self.tier = kwargs.get(
'tier', None)
self.variantClassification = kwargs.get(
'variantClassification', None)
self.variantConsequences = kwargs.get(
'variantConsequences', None)
self.vendorSpecificScores = kwargs.get(
'vendorSpecificScores', None)
class ReportEventCancer(ProtocolElement):
"""
A report event holds all the information about why a given variant
is relevant to report. This is the report event corresponding
to the cancer program
"""
_schemaSource = """
{"type": "record", "name": "ReportEventCancer", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "reportEventId", "type": "string", "doc": ""}, {"name": "genomicEntities",
"type": {"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "variantConsequences", "type": {"type": "array", "items": {"type": "record", "name":
"VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""}, {"name":
"name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "actions", "type": ["null",
{"type": "array", "items": {"type": "record", "name": "Action", "doc": "", "fields": [{"name":
"actionType", "type": ["null", {"type": "enum", "name": "ActionType", "doc": "", "symbols":
["therapy", "therapeutic", "prognosis", "diagnosis"]}], "doc": ""}, {"name": "references", "type":
["null", {"type": "array", "items": "string"}], "doc": ""}, {"name": "status", "type": ["null",
{"type": "enum", "name": "ActionStatus", "doc": "", "symbols": ["clinical", "pre_clinical"]}],
"doc": ""}, {"name": "variantActionable", "type": "boolean", "doc": ""}, {"name": "url", "type":
["null", "string"], "doc": ""}, {"name": "evidenceType", "type": ["null", "string"], "doc": ""},
{"name": "source", "type": "string", "doc": ""}]}}], "doc": ""}, {"name": "groupOfVariants", "type":
["null", "int"], "doc": ""}, {"name": "eventJustification", "type": ["null", "string"], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "roleInCancer", "type": ["null", {"type": "array", "items": {"type": "enum", "name":
"RoleInCancer", "doc": "", "symbols": ["oncogene", "tumor_suppressor_gene", "both"]}}], "doc": ""},
{"name": "tier", "type": ["null", {"type": "enum", "name": "Tier", "doc": "", "symbols": ["NONE",
"TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"actions",
"eventJustification",
"genomicEntities",
"groupOfVariants",
"reportEventId",
"roleInCancer",
"score",
"tier",
"variantClassification",
"variantConsequences",
"vendorSpecificScores",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'actions': Action,
'genomicEntities': GenomicEntity,
'variantClassification': VariantClassification,
'variantConsequences': VariantConsequence,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'actions': Action,
'genomicEntities': GenomicEntity,
'variantClassification': VariantClassification,
'variantConsequences': VariantConsequence,
}
return embeddedTypes[fieldName]
__slots__ = [
'actions', 'eventJustification', 'genomicEntities',
'groupOfVariants', 'reportEventId', 'roleInCancer', 'score',
'tier', 'variantClassification', 'variantConsequences',
'vendorSpecificScores'
]
def __init__(self, **kwargs):
self.actions = kwargs.get(
'actions', None)
self.eventJustification = kwargs.get(
'eventJustification', None)
self.genomicEntities = kwargs.get(
'genomicEntities', None)
self.groupOfVariants = kwargs.get(
'groupOfVariants', None)
self.reportEventId = kwargs.get(
'reportEventId', None)
self.roleInCancer = kwargs.get(
'roleInCancer', None)
self.score = kwargs.get(
'score', None)
self.tier = kwargs.get(
'tier', None)
self.variantClassification = kwargs.get(
'variantClassification', None)
self.variantConsequences = kwargs.get(
'variantConsequences', None)
self.vendorSpecificScores = kwargs.get(
'vendorSpecificScores', None)
class ReportVersionControl(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "ReportVersionControl", "namespace": "org.gel.models.report.avro",
"fields": [{"name": "gitVersionControl", "type": "string", "doc": "", "default": "5.0.0"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'gitVersionControl'
]
def __init__(self, **kwargs):
self.gitVersionControl = kwargs.get(
'gitVersionControl', '5.0.0')
class ReportedModeOfInheritance(object):
"""
An enumeration for the different mode of inheritances: *
`monoallelic_not_imprinted`: MONOALLELIC, autosomal or
pseudoautosomal, not imprinted *
`monoallelic_maternally_imprinted`: MONOALLELIC, autosomal or
pseudoautosomal, maternally imprinted (paternal allele expressed)
* `monoallelic_paternally_imprinted`: MONOALLELIC, autosomal or
pseudoautosomal, paternally imprinted (maternal allele expressed)
* `monoallelic`: MONOALLELIC, autosomal or pseudoautosomal,
imprinted status unknown * `biallelic`: BIALLELIC, autosomal or
pseudoautosomal * `monoallelic_and_biallelic`: BOTH monoallelic
and biallelic, autosomal or pseudoautosomal *
`monoallelic_and_more_severe_biallelic`: BOTH monoallelic and
biallelic, autosomal or pseudoautosomal (but BIALLELIC mutations
cause a more SEVERE disease form), autosomal or pseudoautosomal *
`xlinked_biallelic`: X-LINKED: hemizygous mutation in males,
biallelic mutations in females * `xlinked_monoallelic`: X linked:
hemizygous mutation in males, monoallelic mutations in females may
cause disease (may be less severe, later onset than males) *
`mitochondrial`: MITOCHONDRIAL * `unknown`: Unknown
"""
monoallelic = "monoallelic"
monoallelic_not_imprinted = "monoallelic_not_imprinted"
monoallelic_maternally_imprinted = "monoallelic_maternally_imprinted"
monoallelic_paternally_imprinted = "monoallelic_paternally_imprinted"
biallelic = "biallelic"
monoallelic_and_biallelic = "monoallelic_and_biallelic"
monoallelic_and_more_severe_biallelic = "monoallelic_and_more_severe_biallelic"
xlinked_biallelic = "xlinked_biallelic"
xlinked_monoallelic = "xlinked_monoallelic"
mitochondrial = "mitochondrial"
unknown = "unknown"
def __hash__(self):
return str(self).__hash__()
class ReportedVariant(ProtocolElement):
"""
A reported variant
"""
_schemaSource = """
{"type": "record", "name": "ReportedVariant", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "variantCoordinates", "type": {"type": "record", "name": "VariantCoordinates",
"doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position",
"type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate",
"type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly",
"doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name": "dbSnpId", "type":
["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEvent", "doc": "", "fields": [{"name": "reportEventId",
"type": "string", "doc": ""}, {"name": "phenotypes", "type": {"type": "array", "items": "string"},
"doc": ""}, {"name": "variantConsequences", "type": {"type": "array", "items": {"type": "record",
"name": "VariantConsequence", "doc": "", "fields": [{"name": "id", "type": "string", "doc": ""},
{"name": "name", "type": ["null", "string"], "doc": ""}]}}, "doc": ""}, {"name": "genePanel",
"type": ["null", {"type": "record", "name": "GenePanel", "doc": "", "fields": [{"name": "panelName",
"type": "string", "doc": ""}, {"name": "panelVersion", "type": ["null", "string"], "doc": ""}]}],
"doc": ""}, {"name": "modeOfInheritance", "type": {"type": "enum", "name":
"ReportedModeOfInheritance", "doc": "", "symbols": ["monoallelic", "monoallelic_not_imprinted",
"monoallelic_maternally_imprinted", "monoallelic_paternally_imprinted", "biallelic",
"monoallelic_and_biallelic", "monoallelic_and_more_severe_biallelic", "xlinked_biallelic",
"xlinked_monoallelic", "mitochondrial", "unknown"]}, "doc": ""}, {"name": "genomicEntities", "type":
{"type": "array", "items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields":
[{"name": "type", "type": {"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols":
["regulatory_region", "gene", "transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId",
"type": "string", "doc": ""}, {"name": "geneSymbol", "type": ["null", "string"], "doc": ""},
{"name": "otherIds", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}}, "doc":
""}, {"name": "penetrance", "type": ["null", {"type": "enum", "name": "Penetrance", "namespace":
"org.gel.models.participant.avro", "doc": "", "symbols": ["complete", "incomplete"]}], "doc": ""},
{"name": "score", "type": ["null", "float"], "doc": ""}, {"name": "vendorSpecificScores", "type":
["null", {"type": "map", "values": "float"}], "doc": ""}, {"name": "variantClassification", "type":
["null", {"type": "record", "name": "VariantClassification", "doc": "", "fields": [{"name":
"clinicalSignificance", "type": ["null", {"type": "enum", "name": "ClinicalSignificance", "symbols":
["benign", "likely_benign", "VUS", "likely_pathogenic", "pathogenic", "uncertain_significance"]}],
"doc": ""}, {"name": "drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "fullyExplainsPhenotype", "type": ["null", "boolean"], "doc": ""}, {"name":
"groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name": "eventJustification", "type":
["null", "string"], "doc": ""}, {"name": "tier", "type": ["null", {"type": "enum", "name": "Tier",
"doc": "", "symbols": ["NONE", "TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc":
""}, {"name": "additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "references", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}, {"name": "variantAttributes", "type": ["null", {"type": "record", "name":
"VariantAttributes", "doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""},
{"name": "recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type":
["null", "string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values":
"string"}], "doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type":
"array", "items": {"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name":
"study", "type": "string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalNumericVariantAnnotations",
"additionalTextualVariantAnnotations",
"alleleFrequencies",
"alleleOrigins",
"cdnaChanges",
"clinVarIds",
"comments",
"cosmicIds",
"dbSnpId",
"genomicChanges",
"proteinChanges",
"references",
"reportEvents",
"variantAttributes",
"variantCalls",
"variantCoordinates",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'alleleFrequencies': AlleleFrequency,
'reportEvents': ReportEvent,
'variantAttributes': VariantAttributes,
'variantCalls': VariantCall,
'variantCoordinates': VariantCoordinates,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'alleleFrequencies': AlleleFrequency,
'reportEvents': ReportEvent,
'variantAttributes': VariantAttributes,
'variantCalls': VariantCall,
'variantCoordinates': VariantCoordinates,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalNumericVariantAnnotations',
'additionalTextualVariantAnnotations', 'alleleFrequencies',
'alleleOrigins', 'cdnaChanges', 'clinVarIds', 'comments',
'cosmicIds', 'dbSnpId', 'genomicChanges', 'proteinChanges',
'references', 'reportEvents', 'variantAttributes',
'variantCalls', 'variantCoordinates'
]
def __init__(self, **kwargs):
self.additionalNumericVariantAnnotations = kwargs.get(
'additionalNumericVariantAnnotations', None)
self.additionalTextualVariantAnnotations = kwargs.get(
'additionalTextualVariantAnnotations', None)
self.alleleFrequencies = kwargs.get(
'alleleFrequencies', None)
self.alleleOrigins = kwargs.get(
'alleleOrigins', None)
self.cdnaChanges = kwargs.get(
'cdnaChanges', None)
self.clinVarIds = kwargs.get(
'clinVarIds', None)
self.comments = kwargs.get(
'comments', None)
self.cosmicIds = kwargs.get(
'cosmicIds', None)
self.dbSnpId = kwargs.get(
'dbSnpId', None)
self.genomicChanges = kwargs.get(
'genomicChanges', None)
self.proteinChanges = kwargs.get(
'proteinChanges', None)
self.references = kwargs.get(
'references', None)
self.reportEvents = kwargs.get(
'reportEvents', None)
self.variantAttributes = kwargs.get(
'variantAttributes', None)
self.variantCalls = kwargs.get(
'variantCalls', None)
self.variantCoordinates = kwargs.get(
'variantCoordinates', VariantCoordinates())
class ReportedVariantCancer(ProtocolElement):
"""
A reported variant in the cancer program
"""
_schemaSource = """
{"type": "record", "name": "ReportedVariantCancer", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "variantCoordinates", "type": {"type": "record", "name":
"VariantCoordinates", "doc": "", "fields": [{"name": "chromosome", "type": "string", "doc": ""},
{"name": "position", "type": "int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""},
{"name": "alternate", "type": "string", "doc": ""}, {"name": "assembly", "type": {"type": "enum",
"name": "Assembly", "doc": "", "symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}, "doc": ""}, {"name":
"dbSnpId", "type": ["null", "string"], "doc": ""}, {"name": "cosmicIds", "type": ["null", {"type":
"array", "items": "string"}], "doc": ""}, {"name": "clinVarIds", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}, {"name": "genomicChanges", "type": ["null", {"type": "array",
"items": "string"}], "doc": ""}, {"name": "cdnaChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "proteinChanges", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "variantCalls", "type": {"type": "array", "items": {"type":
"record", "name": "VariantCall", "doc": "", "fields": [{"name": "participantId", "type": "string",
"doc": ""}, {"name": "sampleId", "type": "string", "doc": ""}, {"name": "zygosity", "type": {"type":
"enum", "name": "Zygosity", "doc": "", "symbols": ["reference_homozygous", "heterozygous",
"alternate_homozygous", "missing", "half_missing_reference", "half_missing_alternate",
"alternate_hemizigous", "reference_hemizigous", "unk", "na"]}, "doc": ""}, {"name": "phaseSet",
"type": ["null", "int"], "doc": ""}, {"name": "vaf", "type": ["null", "double"], "doc": ""},
{"name": "depthReference", "type": ["null", "int"], "doc": ""}, {"name": "depthAlternate", "type":
["null", "int"], "doc": ""}, {"name": "alleleOrigins", "type": {"type": "array", "items": {"type":
"enum", "name": "AlleleOrigin", "doc": "", "symbols": ["de_novo_variant", "germline_variant",
"maternal_variant", "paternal_variant", "pedigree_specific_variant", "population_specific_variant",
"somatic_variant"]}}, "doc": ""}]}}, "doc": ""}, {"name": "reportEvents", "type": {"type": "array",
"items": {"type": "record", "name": "ReportEventCancer", "doc": "", "fields": [{"name":
"reportEventId", "type": "string", "doc": ""}, {"name": "genomicEntities", "type": {"type": "array",
"items": {"type": "record", "name": "GenomicEntity", "doc": "", "fields": [{"name": "type", "type":
{"type": "enum", "name": "GenomicEntityType", "doc": "", "symbols": ["regulatory_region", "gene",
"transcript", "intergenic"]}, "doc": ""}, {"name": "ensemblId", "type": "string", "doc": ""},
{"name": "geneSymbol", "type": ["null", "string"], "doc": ""}, {"name": "otherIds", "type": ["null",
{"type": "map", "values": "string"}], "doc": ""}]}}, "doc": ""}, {"name": "variantConsequences",
"type": {"type": "array", "items": {"type": "record", "name": "VariantConsequence", "doc": "",
"fields": [{"name": "id", "type": "string", "doc": ""}, {"name": "name", "type": ["null", "string"],
"doc": ""}]}}, "doc": ""}, {"name": "actions", "type": ["null", {"type": "array", "items": {"type":
"record", "name": "Action", "doc": "", "fields": [{"name": "actionType", "type": ["null", {"type":
"enum", "name": "ActionType", "doc": "", "symbols": ["therapy", "therapeutic", "prognosis",
"diagnosis"]}], "doc": ""}, {"name": "references", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}, {"name": "status", "type": ["null", {"type": "enum", "name": "ActionStatus",
"doc": "", "symbols": ["clinical", "pre_clinical"]}], "doc": ""}, {"name": "variantActionable",
"type": "boolean", "doc": ""}, {"name": "url", "type": ["null", "string"], "doc": ""}, {"name":
"evidenceType", "type": ["null", "string"], "doc": ""}, {"name": "source", "type": "string", "doc":
""}]}}], "doc": ""}, {"name": "groupOfVariants", "type": ["null", "int"], "doc": ""}, {"name":
"eventJustification", "type": ["null", "string"], "doc": ""}, {"name": "score", "type": ["null",
"float"], "doc": ""}, {"name": "vendorSpecificScores", "type": ["null", {"type": "map", "values":
"float"}], "doc": ""}, {"name": "variantClassification", "type": ["null", {"type": "record", "name":
"VariantClassification", "doc": "", "fields": [{"name": "clinicalSignificance", "type": ["null",
{"type": "enum", "name": "ClinicalSignificance", "symbols": ["benign", "likely_benign", "VUS",
"likely_pathogenic", "pathogenic", "uncertain_significance"]}], "doc": ""}, {"name":
"drugResponseClassification", "type": ["null", {"type": "enum", "name":
"DrugResponseClassification", "symbols": ["responsive", "resistant", "toxicity", "indication",
"contraindication", "dosing", "increased_monitoring", "efficacy"]}], "doc": ""}, {"name":
"traitAssociation", "type": ["null", {"type": "enum", "name": "TraitAssociation", "symbols":
["established_risk_allele", "likely_risk_allele", "uncertain_risk_allele", "protective"]}], "doc":
""}, {"name": "tumorigenesisClassification", "type": ["null", {"type": "enum", "name":
"TumorigenesisClassification", "symbols": ["driver", "passenger", "modifier"]}], "doc": ""},
{"name": "functionalEffect", "type": ["null", {"type": "enum", "name": "VariantFunctionalEffect",
"symbols": ["dominant_negative_variant", "gain_of_function_variant", "lethal_variant",
"loss_of_function_variant", "loss_of_heterozygosity", "null_variant"]}], "doc": ""}]}], "doc": ""},
{"name": "roleInCancer", "type": ["null", {"type": "array", "items": {"type": "enum", "name":
"RoleInCancer", "doc": "", "symbols": ["oncogene", "tumor_suppressor_gene", "both"]}}], "doc": ""},
{"name": "tier", "type": ["null", {"type": "enum", "name": "Tier", "doc": "", "symbols": ["NONE",
"TIER1", "TIER2", "TIER3", "TIER4", "TIER5"]}], "doc": ""}]}}, "doc": ""}, {"name":
"additionalTextualVariantAnnotations", "type": ["null", {"type": "map", "values": "string"}], "doc":
""}, {"name": "references", "type": ["null", {"type": "map", "values": "string"}], "doc": ""},
{"name": "variantAttributes", "type": ["null", {"type": "record", "name": "VariantAttributes",
"doc": "", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""}, {"name":
"recurrentlyReported", "type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type": ["null",
"string"], "doc": ""}, {"name": "others", "type": ["null", {"type": "map", "values": "string"}],
"doc": ""}]}], "doc": ""}, {"name": "alleleFrequencies", "type": ["null", {"type": "array", "items":
{"type": "record", "name": "AlleleFrequency", "doc": "", "fields": [{"name": "study", "type":
"string", "doc": ""}, {"name": "population", "type": "string", "doc": ""}, {"name":
"alternateFrequency", "type": "float", "doc": ""}]}}], "doc": ""}, {"name":
"additionalNumericVariantAnnotations", "type": ["null", {"type": "map", "values": "float"}], "doc":
""}, {"name": "comments", "type": ["null", {"type": "array", "items": "string"}], "doc": ""},
{"name": "alleleOrigins", "type": {"type": "array", "items": "AlleleOrigin"}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"additionalNumericVariantAnnotations",
"additionalTextualVariantAnnotations",
"alleleFrequencies",
"alleleOrigins",
"cdnaChanges",
"clinVarIds",
"comments",
"cosmicIds",
"dbSnpId",
"genomicChanges",
"proteinChanges",
"references",
"reportEvents",
"variantAttributes",
"variantCalls",
"variantCoordinates",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'alleleFrequencies': AlleleFrequency,
'reportEvents': ReportEventCancer,
'variantAttributes': VariantAttributes,
'variantCalls': VariantCall,
'variantCoordinates': VariantCoordinates,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'alleleFrequencies': AlleleFrequency,
'reportEvents': ReportEventCancer,
'variantAttributes': VariantAttributes,
'variantCalls': VariantCall,
'variantCoordinates': VariantCoordinates,
}
return embeddedTypes[fieldName]
__slots__ = [
'additionalNumericVariantAnnotations',
'additionalTextualVariantAnnotations', 'alleleFrequencies',
'alleleOrigins', 'cdnaChanges', 'clinVarIds', 'comments',
'cosmicIds', 'dbSnpId', 'genomicChanges', 'proteinChanges',
'references', 'reportEvents', 'variantAttributes',
'variantCalls', 'variantCoordinates'
]
def __init__(self, **kwargs):
self.additionalNumericVariantAnnotations = kwargs.get(
'additionalNumericVariantAnnotations', None)
self.additionalTextualVariantAnnotations = kwargs.get(
'additionalTextualVariantAnnotations', None)
self.alleleFrequencies = kwargs.get(
'alleleFrequencies', None)
self.alleleOrigins = kwargs.get(
'alleleOrigins', None)
self.cdnaChanges = kwargs.get(
'cdnaChanges', None)
self.clinVarIds = kwargs.get(
'clinVarIds', None)
self.comments = kwargs.get(
'comments', None)
self.cosmicIds = kwargs.get(
'cosmicIds', None)
self.dbSnpId = kwargs.get(
'dbSnpId', None)
self.genomicChanges = kwargs.get(
'genomicChanges', None)
self.proteinChanges = kwargs.get(
'proteinChanges', None)
self.references = kwargs.get(
'references', None)
self.reportEvents = kwargs.get(
'reportEvents', None)
self.variantAttributes = kwargs.get(
'variantAttributes', None)
self.variantCalls = kwargs.get(
'variantCalls', None)
self.variantCoordinates = kwargs.get(
'variantCoordinates', VariantCoordinates())
class ReportingQuestion(object):
"""
No documentation
"""
yes = "yes"
no = "no"
na = "na"
def __hash__(self):
return str(self).__hash__()
class ReviewedParts(object):
"""
An enumeration for Which parts of the WGA were reviewed?: *
`domain_1`: Domain 1 only * `domain_1_and_2`: Domains 1 and 2 *
`domain_1_2_and_suplementary`: Domains 1, 2 and supplementary
analysis
"""
domain_1 = "domain_1"
domain_1_and_2 = "domain_1_and_2"
domain_1_2_and_suplementary = "domain_1_2_and_suplementary"
def __hash__(self):
return str(self).__hash__()
class RoleInCancer(object):
"""
The role of a given genomic feature in cancer * `NCIT_C16936`:
oncogene. A gene that is a mutated (changed) form of a gene
involved in normal cell growth. Oncogenes may cause the growth of
cancer cells. Mutations in genes that become oncogenes can be
inherited or caused by being exposed to substances in the
environment that cause cancer.
http://purl.obolibrary.org/obo/NCIT_C16936 * `NCIT_C17362`:
tumor_suppressor_gene. A type of gene that makes a protein called
a tumor suppressor protein that helps control cell growth.
Mutations (changes in DNA) in antioncogenes may lead to cancer.
http://purl.obolibrary.org/obo/NCIT_C17362
"""
oncogene = "oncogene"
tumor_suppressor_gene = "tumor_suppressor_gene"
both = "both"
def __hash__(self):
return str(self).__hash__()
class Sample(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "Sample", "namespace": "org.gel.models.participant.avro", "fields":
[{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type": "int", "doc":
""}, {"name": "source", "type": ["null", {"type": "enum", "name": "SampleSource", "doc": "",
"symbols": ["TUMOUR", "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS",
"BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA", "FIBROBLAST", "TISSUE"]}], "doc": ""},
{"name": "product", "type": ["null", {"type": "enum", "name": "Product", "symbols": ["DNA",
"RNA"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type": "enum", "name":
"PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE", "CD128_SORTED_CELLS",
"ASPIRATE"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"labSampleId",
"preparationMethod",
"product",
"sampleId",
"source",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'labSampleId', 'preparationMethod', 'product', 'sampleId',
'source'
]
def __init__(self, **kwargs):
self.labSampleId = kwargs.get(
'labSampleId', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
class SampleSource(object):
"""
The source of the sample
"""
TUMOUR = "TUMOUR"
BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS = "BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS"
BONE_MARROW_ASPIRATE_TUMOUR_CELLS = "BONE_MARROW_ASPIRATE_TUMOUR_CELLS"
BLOOD = "BLOOD"
SALIVA = "SALIVA"
FIBROBLAST = "FIBROBLAST"
TISSUE = "TISSUE"
def __hash__(self):
return str(self).__hash__()
class SegregationQuestion(object):
"""
No documentation
"""
yes = "yes"
no = "no"
def __hash__(self):
return str(self).__hash__()
class SensitiveInformation(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "SensitiveInformation", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "versionControl", "type": {"type": "record", "name": "VersionControl", "fields":
[{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}, "doc": ""},
{"name": "gelID", "type": "string"}, {"name": "externalIds", "type": ["null", {"type": "array",
"items": "string"}]}, {"name": "genomicMedicineCenter", "type": ["null", "string"]}, {"name":
"fullNameOfResponsibleConsultant", "type": ["null", "string"]}, {"name": "contactNumber", "type":
["null", "string"]}, {"name": "hospitalOfResponsibleConsultant", "type": ["null", "string"]},
{"name": "centerSampleId", "type": ["null", "string"]}, {"name": "originatingCenter", "type":
["null", "string"]}, {"name": "centerPatientId", "type": ["null", "string"]}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"centerPatientId",
"centerSampleId",
"contactNumber",
"externalIds",
"fullNameOfResponsibleConsultant",
"gelID",
"genomicMedicineCenter",
"hospitalOfResponsibleConsultant",
"originatingCenter",
"versionControl",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'versionControl': VersionControl,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'versionControl': VersionControl,
}
return embeddedTypes[fieldName]
__slots__ = [
'centerPatientId', 'centerSampleId', 'contactNumber',
'externalIds', 'fullNameOfResponsibleConsultant', 'gelID',
'genomicMedicineCenter', 'hospitalOfResponsibleConsultant',
'originatingCenter', 'versionControl'
]
def __init__(self, **kwargs):
self.centerPatientId = kwargs.get(
'centerPatientId', None)
self.centerSampleId = kwargs.get(
'centerSampleId', None)
self.contactNumber = kwargs.get(
'contactNumber', None)
self.externalIds = kwargs.get(
'externalIds', None)
self.fullNameOfResponsibleConsultant = kwargs.get(
'fullNameOfResponsibleConsultant', None)
self.gelID = kwargs.get(
'gelID', None)
self.genomicMedicineCenter = kwargs.get(
'genomicMedicineCenter', None)
self.hospitalOfResponsibleConsultant = kwargs.get(
'hospitalOfResponsibleConsultant', None)
self.originatingCenter = kwargs.get(
'originatingCenter', None)
self.versionControl = kwargs.get(
'versionControl', VersionControl())
class Severity(object):
"""
No documentation
"""
BORDERLINE = "BORDERLINE"
MILD = "MILD"
MODERATE = "MODERATE"
SEVERE = "SEVERE"
PROFOUND = "PROFOUND"
def __hash__(self):
return str(self).__hash__()
class Sex(object):
"""
Sex
"""
MALE = "MALE"
FEMALE = "FEMALE"
UNKNOWN = "UNKNOWN"
def __hash__(self):
return str(self).__hash__()
class SpatialPattern(object):
"""
No documentation
"""
DISTAL = "DISTAL"
GENERALIZED = "GENERALIZED"
LOCALIZED = "LOCALIZED"
PROXIMAL = "PROXIMAL"
def __hash__(self):
return str(self).__hash__()
class SupportingEvidences(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "SupportingEvidences", "namespace": "org.gel.models.report.avro",
"fields": [{"name": "previousSupportingEvidences", "type": {"type": "array", "items": "string"}},
{"name": "modifiedSupportingEvidences", "type": {"type": "array", "items": "string"}}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"modifiedSupportingEvidences",
"previousSupportingEvidences",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'modifiedSupportingEvidences', 'previousSupportingEvidences'
]
def __init__(self, **kwargs):
self.modifiedSupportingEvidences = kwargs.get(
'modifiedSupportingEvidences', None)
self.previousSupportingEvidences = kwargs.get(
'previousSupportingEvidences', None)
class TernaryOption(object):
"""
This defines a yes/no/unknown case
"""
yes = "yes"
no = "no"
unknown = "unknown"
def __hash__(self):
return str(self).__hash__()
class Tier(object):
"""
Variant tiers as defined by Genomics England
"""
NONE = "NONE"
TIER1 = "TIER1"
TIER2 = "TIER2"
TIER3 = "TIER3"
TIER4 = "TIER4"
TIER5 = "TIER5"
def __hash__(self):
return str(self).__hash__()
class TissueSource(object):
"""
No documentation
"""
BMA_TUMOUR_SORTED_CELLS = "BMA_TUMOUR_SORTED_CELLS"
CT_GUIDED_BIOPSY = "CT_GUIDED_BIOPSY"
ENDOSCOPIC_BIOPSY = "ENDOSCOPIC_BIOPSY"
ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY = "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY"
ENDOSCOPIC_ULTRASOUND_GUIDED_FNA = "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA"
LAPAROSCOPIC_BIOPSY = "LAPAROSCOPIC_BIOPSY"
LAPAROSCOPIC_EXCISION = "LAPAROSCOPIC_EXCISION"
MRI_GUIDED_BIOPSY = "MRI_GUIDED_BIOPSY"
NON_GUIDED_BIOPSY = "NON_GUIDED_BIOPSY"
SURGICAL_RESECTION = "SURGICAL_RESECTION"
STEREOTACTICALLY_GUIDED_BIOPSY = "STEREOTACTICALLY_GUIDED_BIOPSY"
USS_GUIDED_BIOPSY = "USS_GUIDED_BIOPSY"
NON_STANDARD_BIOPSY = "NON_STANDARD_BIOPSY"
def __hash__(self):
return str(self).__hash__()
class TraitAssociation(object):
"""
No documentation
"""
established_risk_allele = "established_risk_allele"
likely_risk_allele = "likely_risk_allele"
uncertain_risk_allele = "uncertain_risk_allele"
protective = "protective"
def __hash__(self):
return str(self).__hash__()
class TumorigenesisClassification(object):
"""
No documentation
"""
driver = "driver"
passenger = "passenger"
modifier = "modifier"
def __hash__(self):
return str(self).__hash__()
class TumourContent(object):
"""
No documentation
"""
High = "High"
Medium = "Medium"
Low = "Low"
def __hash__(self):
return str(self).__hash__()
class TumourSample(ProtocolElement):
"""
A tumour sample
"""
_schemaSource = """
{"type": "record", "name": "TumourSample", "namespace": "org.gel.models.participant.avro", "doc":
"", "fields": [{"name": "sampleId", "type": "string", "doc": ""}, {"name": "labSampleId", "type":
"int", "doc": ""}, {"name": "LDPCode", "type": "string", "doc": ""}, {"name": "tumourId", "type":
"string", "doc": ""}, {"name": "programmePhase", "type": ["null", {"type": "enum", "name":
"ProgrammePhase", "symbols": ["CRUK", "OXFORD", "CLL", "IIP", "MAIN", "EXPT"]}], "doc": ""},
{"name": "diseaseType", "type": ["null", {"type": "enum", "name": "diseaseType", "symbols":
["ADULT_GLIOMA", "BLADDER", "BREAST", "CARCINOMA_OF_UNKNOWN_PRIMARY", "CHILDHOOD", "COLORECTAL",
"ENDOMETRIAL_CARCINOMA", "HAEMONC", "HEPATOPANCREATOBILIARY", "LUNG", "MALIGNANT_MELANOMA",
"NASOPHARYNGEAL", "ORAL_OROPHARYNGEAL", "OVARIAN", "PROSTATE", "RENAL", "SARCOMA", "SINONASAL",
"TESTICULAR_GERM_CELL_TUMOURS", "UPPER_GASTROINTESTINAL",
"NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE", "CLASSICAL_HODGKINS",
"NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS", "T_CELL_LYMPHOMA"]}], "doc": ""}, {"name":
"diseaseSubType", "type": ["null", "string"], "doc": ""}, {"name": "clinicalSampleDateTime", "type":
["null", "string"], "doc": ""}, {"name": "tumourType", "type": ["null", {"type": "enum", "name":
"TumourType", "symbols": ["PRIMARY", "METASTATIC_RECURRENCE", "RECURRENCE_OF_PRIMARY_TUMOUR",
"METASTASES"]}], "doc": ""}, {"name": "tumourContent", "type": ["null", {"type": "enum", "name":
"TumourContent", "symbols": ["High", "Medium", "Low"]}], "doc": ""}, {"name": "source", "type":
["null", {"type": "enum", "name": "SampleSource", "doc": "", "symbols": ["TUMOUR",
"BONE_MARROW_ASPIRATE_TUMOUR_SORTED_CELLS", "BONE_MARROW_ASPIRATE_TUMOUR_CELLS", "BLOOD", "SALIVA",
"FIBROBLAST", "TISSUE"]}], "doc": ""}, {"name": "preparationMethod", "type": ["null", {"type":
"enum", "name": "PreparationMethod", "symbols": ["EDTA", "ORAGENE", "FF", "FFPE",
"CD128_SORTED_CELLS", "ASPIRATE"]}], "doc": ""}, {"name": "tissueSource", "type": ["null", {"type":
"enum", "name": "TissueSource", "symbols": ["BMA_TUMOUR_SORTED_CELLS", "CT_GUIDED_BIOPSY",
"ENDOSCOPIC_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_BIOPSY", "ENDOSCOPIC_ULTRASOUND_GUIDED_FNA",
"LAPAROSCOPIC_BIOPSY", "LAPAROSCOPIC_EXCISION", "MRI_GUIDED_BIOPSY", "NON_GUIDED_BIOPSY",
"SURGICAL_RESECTION", "STEREOTACTICALLY_GUIDED_BIOPSY", "USS_GUIDED_BIOPSY",
"NON_STANDARD_BIOPSY"]}], "doc": ""}, {"name": "product", "type": ["null", {"type": "enum", "name":
"Product", "symbols": ["DNA", "RNA"]}], "doc": ""}, {"name": "morphologyICD", "type": ["null",
"string"], "doc": ""}, {"name": "morphologySnomedCT", "type": ["null", "string"], "doc": ""},
{"name": "morphologySnomedRT", "type": ["null", "string"], "doc": ""}, {"name": "topographyICD",
"type": ["null", "string"], "doc": ""}, {"name": "topographySnomedCT", "type": ["null", "string"],
"doc": ""}, {"name": "topographySnomedRT", "type": ["null", "string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"LDPCode",
"clinicalSampleDateTime",
"diseaseSubType",
"diseaseType",
"labSampleId",
"morphologyICD",
"morphologySnomedCT",
"morphologySnomedRT",
"preparationMethod",
"product",
"programmePhase",
"sampleId",
"source",
"tissueSource",
"topographyICD",
"topographySnomedCT",
"topographySnomedRT",
"tumourContent",
"tumourId",
"tumourType",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'LDPCode', 'clinicalSampleDateTime', 'diseaseSubType',
'diseaseType', 'labSampleId', 'morphologyICD',
'morphologySnomedCT', 'morphologySnomedRT',
'preparationMethod', 'product', 'programmePhase', 'sampleId',
'source', 'tissueSource', 'topographyICD',
'topographySnomedCT', 'topographySnomedRT', 'tumourContent',
'tumourId', 'tumourType'
]
def __init__(self, **kwargs):
self.LDPCode = kwargs.get(
'LDPCode', None)
self.clinicalSampleDateTime = kwargs.get(
'clinicalSampleDateTime', None)
self.diseaseSubType = kwargs.get(
'diseaseSubType', None)
self.diseaseType = kwargs.get(
'diseaseType', None)
self.labSampleId = kwargs.get(
'labSampleId', None)
self.morphologyICD = kwargs.get(
'morphologyICD', None)
self.morphologySnomedCT = kwargs.get(
'morphologySnomedCT', None)
self.morphologySnomedRT = kwargs.get(
'morphologySnomedRT', None)
self.preparationMethod = kwargs.get(
'preparationMethod', None)
self.product = kwargs.get(
'product', None)
self.programmePhase = kwargs.get(
'programmePhase', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.source = kwargs.get(
'source', None)
self.tissueSource = kwargs.get(
'tissueSource', None)
self.topographyICD = kwargs.get(
'topographyICD', None)
self.topographySnomedCT = kwargs.get(
'topographySnomedCT', None)
self.topographySnomedRT = kwargs.get(
'topographySnomedRT', None)
self.tumourContent = kwargs.get(
'tumourContent', None)
self.tumourId = kwargs.get(
'tumourId', None)
self.tumourType = kwargs.get(
'tumourType', None)
class TumourType(object):
"""
No documentation
"""
PRIMARY = "PRIMARY"
METASTATIC_RECURRENCE = "METASTATIC_RECURRENCE"
RECURRENCE_OF_PRIMARY_TUMOUR = "RECURRENCE_OF_PRIMARY_TUMOUR"
METASTASES = "METASTASES"
def __hash__(self):
return str(self).__hash__()
class VariantAttributes(ProtocolElement):
"""
Some additional variant attributes
"""
_schemaSource = """
{"type": "record", "name": "VariantAttributes", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "ihp", "type": ["null", "int"], "doc": ""}, {"name": "recurrentlyReported",
"type": ["null", "boolean"], "doc": ""}, {"name": "fdp50", "type": ["null", "string"], "doc": ""},
{"name": "others", "type": ["null", {"type": "map", "values": "string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"fdp50",
"ihp",
"others",
"recurrentlyReported",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'fdp50', 'ihp', 'others', 'recurrentlyReported'
]
def __init__(self, **kwargs):
self.fdp50 = kwargs.get(
'fdp50', None)
self.ihp = kwargs.get(
'ihp', None)
self.others = kwargs.get(
'others', None)
self.recurrentlyReported = kwargs.get(
'recurrentlyReported', None)
class VariantCall(ProtocolElement):
"""
This object holds all the information related to a specific
variant observation in a given sample, including zygosity,
phase, depth of coverage, variant allele frequency and allele
origins.
"""
_schemaSource = """
{"type": "record", "name": "VariantCall", "namespace": "org.gel.models.report.avro", "doc": "",
"fields": [{"name": "participantId", "type": "string", "doc": ""}, {"name": "sampleId", "type":
"string", "doc": ""}, {"name": "zygosity", "type": {"type": "enum", "name": "Zygosity", "doc": "",
"symbols": ["reference_homozygous", "heterozygous", "alternate_homozygous", "missing",
"half_missing_reference", "half_missing_alternate", "alternate_hemizigous", "reference_hemizigous",
"unk", "na"]}, "doc": ""}, {"name": "phaseSet", "type": ["null", "int"], "doc": ""}, {"name": "vaf",
"type": ["null", "double"], "doc": ""}, {"name": "depthReference", "type": ["null", "int"], "doc":
""}, {"name": "depthAlternate", "type": ["null", "int"], "doc": ""}, {"name": "alleleOrigins",
"type": {"type": "array", "items": {"type": "enum", "name": "AlleleOrigin", "doc": "", "symbols":
["de_novo_variant", "germline_variant", "maternal_variant", "paternal_variant",
"pedigree_specific_variant", "population_specific_variant", "somatic_variant"]}}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"alleleOrigins",
"depthAlternate",
"depthReference",
"participantId",
"phaseSet",
"sampleId",
"vaf",
"zygosity",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'alleleOrigins', 'depthAlternate', 'depthReference',
'participantId', 'phaseSet', 'sampleId', 'vaf', 'zygosity'
]
def __init__(self, **kwargs):
self.alleleOrigins = kwargs.get(
'alleleOrigins', None)
self.depthAlternate = kwargs.get(
'depthAlternate', None)
self.depthReference = kwargs.get(
'depthReference', None)
self.participantId = kwargs.get(
'participantId', None)
self.phaseSet = kwargs.get(
'phaseSet', None)
self.sampleId = kwargs.get(
'sampleId', None)
self.vaf = kwargs.get(
'vaf', None)
self.zygosity = kwargs.get(
'zygosity', None)
class VariantClassification(ProtocolElement):
"""
The variant classification according to different properties.
"""
_schemaSource = """
{"type": "record", "name": "VariantClassification", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "clinicalSignificance", "type": ["null", {"type": "enum", "name":
"ClinicalSignificance", "symbols": ["benign", "likely_benign", "VUS", "likely_pathogenic",
"pathogenic", "uncertain_significance"]}], "doc": ""}, {"name": "drugResponseClassification",
"type": ["null", {"type": "enum", "name": "DrugResponseClassification", "symbols": ["responsive",
"resistant", "toxicity", "indication", "contraindication", "dosing", "increased_monitoring",
"efficacy"]}], "doc": ""}, {"name": "traitAssociation", "type": ["null", {"type": "enum", "name":
"TraitAssociation", "symbols": ["established_risk_allele", "likely_risk_allele",
"uncertain_risk_allele", "protective"]}], "doc": ""}, {"name": "tumorigenesisClassification",
"type": ["null", {"type": "enum", "name": "TumorigenesisClassification", "symbols": ["driver",
"passenger", "modifier"]}], "doc": ""}, {"name": "functionalEffect", "type": ["null", {"type":
"enum", "name": "VariantFunctionalEffect", "symbols": ["dominant_negative_variant",
"gain_of_function_variant", "lethal_variant", "loss_of_function_variant", "loss_of_heterozygosity",
"null_variant"]}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"clinicalSignificance",
"drugResponseClassification",
"functionalEffect",
"traitAssociation",
"tumorigenesisClassification",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'clinicalSignificance', 'drugResponseClassification',
'functionalEffect', 'traitAssociation',
'tumorigenesisClassification'
]
def __init__(self, **kwargs):
self.clinicalSignificance = kwargs.get(
'clinicalSignificance', None)
self.drugResponseClassification = kwargs.get(
'drugResponseClassification', None)
self.functionalEffect = kwargs.get(
'functionalEffect', None)
self.traitAssociation = kwargs.get(
'traitAssociation', None)
self.tumorigenesisClassification = kwargs.get(
'tumorigenesisClassification', None)
class VariantConsequence(ProtocolElement):
"""
A variant consequence as defined by the Sequence Ontology (SO)
(e.g.: id = SO:0001816 ; name = non synonymous) NOTE: this
record is equivalent to OpenCB's `ConsequenceType`, but we want to
avoid naming collisions
"""
_schemaSource = """
{"type": "record", "name": "VariantConsequence", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "id", "type": "string", "doc": ""}, {"name": "name", "type": ["null",
"string"], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"id",
"name",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'id', 'name'
]
def __init__(self, **kwargs):
self.id = kwargs.get(
'id', None)
self.name = kwargs.get(
'name', None)
class VariantCoordinates(ProtocolElement):
"""
The variant coordinates representing uniquely a small variant.
No multi-allelic variant supported, alternate only represents one
alternate allele.
"""
_schemaSource = """
{"type": "record", "name": "VariantCoordinates", "namespace": "org.gel.models.report.avro", "doc":
"", "fields": [{"name": "chromosome", "type": "string", "doc": ""}, {"name": "position", "type":
"int", "doc": ""}, {"name": "reference", "type": "string", "doc": ""}, {"name": "alternate", "type":
"string", "doc": ""}, {"name": "assembly", "type": {"type": "enum", "name": "Assembly", "doc": "",
"symbols": ["GRCh38", "GRCh37"]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"alternate",
"assembly",
"chromosome",
"position",
"reference",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'alternate', 'assembly', 'chromosome', 'position', 'reference'
]
def __init__(self, **kwargs):
self.alternate = kwargs.get(
'alternate', None)
self.assembly = kwargs.get(
'assembly', None)
self.chromosome = kwargs.get(
'chromosome', None)
self.position = kwargs.get(
'position', None)
self.reference = kwargs.get(
'reference', None)
class VariantFunctionalEffect(object):
"""
No documentation
"""
dominant_negative_variant = "dominant_negative_variant"
gain_of_function_variant = "gain_of_function_variant"
lethal_variant = "lethal_variant"
loss_of_function_variant = "loss_of_function_variant"
loss_of_heterozygosity = "loss_of_heterozygosity"
null_variant = "null_variant"
def __hash__(self):
return str(self).__hash__()
class VariantGroupLevelQuestions(ProtocolElement):
"""
The variant group level questions
"""
_schemaSource = """
{"type": "record", "name": "VariantGroupLevelQuestions", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "variantGroup", "type": "int", "doc": ""}, {"name":
"variantLevelQuestions", "type": {"type": "array", "items": {"type": "record", "name":
"VariantLevelQuestions", "doc": "", "fields": [{"name": "variantDetails", "type": "string", "doc":
""}, {"name": "confirmationDecision", "type": {"type": "enum", "name": "ConfirmationDecision",
"symbols": ["yes", "no", "na"]}, "doc": ""}, {"name": "confirmationOutcome", "type": {"type":
"enum", "name": "ConfirmationOutcome", "symbols": ["yes", "no", "na"]}, "doc": ""}, {"name":
"reportingQuestion", "type": {"type": "enum", "name": "ReportingQuestion", "symbols": ["yes", "no",
"na"]}, "doc": ""}, {"name": "acmgClassification", "type": {"type": "enum", "name":
"ACMGClassification", "symbols": ["pathogenic_variant", "likely_pathogenic_variant",
"variant_of_unknown_clinical_significance", "likely_benign_variant", "benign_variant",
"not_assessed"]}, "doc": ""}, {"name": "publications", "type": "string", "doc": ""}]}}, "doc": ""},
{"name": "actionability", "type": {"type": "enum", "name": "Actionability", "symbols": ["yes", "no",
"not_yet", "na"]}, "doc": ""}, {"name": "clinicalUtility", "type": {"type": "array", "items":
{"type": "enum", "name": "ClinicalUtility", "symbols": ["none", "change_in_medication",
"surgical_option", "additional_surveillance_for_proband_or_relatives", "clinical_trial_eligibility",
"informs_reproductive_choice", "unknown", "other"]}}, "doc": ""}, {"name": "phenotypesSolved",
"type": {"type": "enum", "name": "PhenotypesSolved", "symbols": ["yes", "no", "partially",
"unknown"]}, "doc": ""}, {"name": "phenotypesExplained", "type": ["null", {"type": "array", "items":
"string"}], "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"actionability",
"clinicalUtility",
"phenotypesExplained",
"phenotypesSolved",
"variantGroup",
"variantLevelQuestions",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'variantLevelQuestions': VariantLevelQuestions,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'variantLevelQuestions': VariantLevelQuestions,
}
return embeddedTypes[fieldName]
__slots__ = [
'actionability', 'clinicalUtility', 'phenotypesExplained',
'phenotypesSolved', 'variantGroup', 'variantLevelQuestions'
]
def __init__(self, **kwargs):
self.actionability = kwargs.get(
'actionability', None)
self.clinicalUtility = kwargs.get(
'clinicalUtility', None)
self.phenotypesExplained = kwargs.get(
'phenotypesExplained', None)
self.phenotypesSolved = kwargs.get(
'phenotypesSolved', None)
self.variantGroup = kwargs.get(
'variantGroup', None)
self.variantLevelQuestions = kwargs.get(
'variantLevelQuestions', None)
class VariantLevelQuestions(ProtocolElement):
"""
The variant level questions
"""
_schemaSource = """
{"type": "record", "name": "VariantLevelQuestions", "namespace": "org.gel.models.report.avro",
"doc": "", "fields": [{"name": "variantDetails", "type": "string", "doc": ""}, {"name":
"confirmationDecision", "type": {"type": "enum", "name": "ConfirmationDecision", "symbols": ["yes",
"no", "na"]}, "doc": ""}, {"name": "confirmationOutcome", "type": {"type": "enum", "name":
"ConfirmationOutcome", "symbols": ["yes", "no", "na"]}, "doc": ""}, {"name": "reportingQuestion",
"type": {"type": "enum", "name": "ReportingQuestion", "symbols": ["yes", "no", "na"]}, "doc": ""},
{"name": "acmgClassification", "type": {"type": "enum", "name": "ACMGClassification", "symbols":
["pathogenic_variant", "likely_pathogenic_variant", "variant_of_unknown_clinical_significance",
"likely_benign_variant", "benign_variant", "not_assessed"]}, "doc": ""}, {"name": "publications",
"type": "string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"acmgClassification",
"confirmationDecision",
"confirmationOutcome",
"publications",
"reportingQuestion",
"variantDetails",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'acmgClassification', 'confirmationDecision',
'confirmationOutcome', 'publications', 'reportingQuestion',
'variantDetails'
]
def __init__(self, **kwargs):
self.acmgClassification = kwargs.get(
'acmgClassification', None)
self.confirmationDecision = kwargs.get(
'confirmationDecision', None)
self.confirmationOutcome = kwargs.get(
'confirmationOutcome', None)
self.publications = kwargs.get(
'publications', None)
self.reportingQuestion = kwargs.get(
'reportingQuestion', None)
self.variantDetails = kwargs.get(
'variantDetails', None)
class VersionControl(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "VersionControl", "namespace": "org.gel.models.participant.avro",
"fields": [{"name": "GitVersionControl", "type": "string", "doc": "", "default": "1.1.0"}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'GitVersionControl'
]
def __init__(self, **kwargs):
self.GitVersionControl = kwargs.get(
'GitVersionControl', '1.1.0')
class Zygosity(object):
"""
It is a representation of the zygosity * `reference_homozygous`:
0/0, 0|0 * `heterozygous`: 0/1, 1/0, 1|0, 0|1 *
`alternate_homozygous`: 1/1, 1|1 * `missing`: ./., .|. *
`half_missing_reference`: ./0, 0/., 0|., .|0 *
`half_missing_alternate`: ./1, 1/., 1|., .|1 *
`alternate_hemizigous`: 1 * `reference_hemizigous`: 0 * `unk`:
Anything unexpected
"""
reference_homozygous = "reference_homozygous"
heterozygous = "heterozygous"
alternate_homozygous = "alternate_homozygous"
missing = "missing"
half_missing_reference = "half_missing_reference"
half_missing_alternate = "half_missing_alternate"
alternate_hemizigous = "alternate_hemizigous"
reference_hemizigous = "reference_hemizigous"
unk = "unk"
na = "na"
def __hash__(self):
return str(self).__hash__()
class diseaseType(object):
"""
No documentation
"""
ADULT_GLIOMA = "ADULT_GLIOMA"
BLADDER = "BLADDER"
BREAST = "BREAST"
CARCINOMA_OF_UNKNOWN_PRIMARY = "CARCINOMA_OF_UNKNOWN_PRIMARY"
CHILDHOOD = "CHILDHOOD"
COLORECTAL = "COLORECTAL"
ENDOMETRIAL_CARCINOMA = "ENDOMETRIAL_CARCINOMA"
HAEMONC = "HAEMONC"
HEPATOPANCREATOBILIARY = "HEPATOPANCREATOBILIARY"
LUNG = "LUNG"
MALIGNANT_MELANOMA = "MALIGNANT_MELANOMA"
NASOPHARYNGEAL = "NASOPHARYNGEAL"
ORAL_OROPHARYNGEAL = "ORAL_OROPHARYNGEAL"
OVARIAN = "OVARIAN"
PROSTATE = "PROSTATE"
RENAL = "RENAL"
SARCOMA = "SARCOMA"
SINONASAL = "SINONASAL"
TESTICULAR_GERM_CELL_TUMOURS = "TESTICULAR_GERM_CELL_TUMOURS"
UPPER_GASTROINTESTINAL = "UPPER_GASTROINTESTINAL"
NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE = "NON_HODGKINS_B_CELL_LYMPHOMA_LOW_MOD_GRADE"
CLASSICAL_HODGKINS = "CLASSICAL_HODGKINS"
NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS = "NODULAR_LYMPHOCYTE_PREDOMINANT_HODGKINS"
T_CELL_LYMPHOMA = "T_CELL_LYMPHOMA"
def __hash__(self):
return str(self).__hash__() | PypiClean |
/42Points-1.2.7-py3-none-any.whl/ftptsgame/expr_utils.py |
import ast
import itertools
from copy import deepcopy
from fractions import Fraction
class Node(object):
"""An expression tree."""
NODE_TYPE_NUMBER = 0
NODE_TYPE_OPERATOR = 1
def __init__(self, _type=NODE_TYPE_NUMBER, ch=None, left=None, right=None):
"""Initialize the node."""
self.type = _type
self.left = left
self.right = right
if self.type == Node.NODE_TYPE_OPERATOR:
self.value = Node.operation(ch, self.left.value, self.right.value)
self.ch = ch
else:
self.value = int(ch)
self.ch = '#'
@staticmethod
def operation(opt, x, y):
"""Basic arithmetic operation between two numbers."""
if opt == '/' and y == 0:
raise ArithmeticError('x/0')
operation_list = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: Fraction(x, y)
}
return operation_list[opt](x, y)
def node_list(self) -> list:
"""Get the list of a node."""
if self.type == Node.NODE_TYPE_OPERATOR:
return self.left.node_list() + [self] + self.right.node_list()
else:
return [self]
def unique_id(self) -> str:
"""Return the unique id (postfix) of this expression."""
if self.type == Node.NODE_TYPE_OPERATOR:
return self.ch + self.left.unique_id() + self.right.unique_id()
else:
return '[' + str(self.value) + ']'
def __repr__(self) -> str:
"""Return the string form of this expression."""
if self.type != Node.NODE_TYPE_OPERATOR:
return str(self.value)
deal_l = self.ch in '*/' and self.left.ch in '+-'
deal_r = (self.ch in '-*/' and self.right.ch in '+-') or (self.ch == '/' and self.right.ch in '*/')
left_string = '(' * deal_l + repr(self.left) + ')' * deal_l
right_string = '(' * deal_r + repr(self.right) + ')' * deal_r
return left_string + self.ch + right_string
def evaluate(self, values: dict = None) -> Fraction:
"""Evaluate the value of this expression using substitution."""
if values is None:
return self.value
if self.type == Node.NODE_TYPE_OPERATOR:
return Node.operation(self.ch, self.left.evaluate(values), self.right.evaluate(values))
else:
return Fraction(values[int(self.value)])
def extract(self) -> list:
"""Extract numbers from the node."""
if self.type == Node.NODE_TYPE_OPERATOR:
return self.left.extract() + self.right.extract()
else:
return [int(self.value)]
def reduce_negative_number(self):
"""
Make all intermediate results of this expression not be negative.
The result of whole expression will become its absolute value.
"""
def _neg(v1: Fraction, v2: Fraction) -> Fraction:
return v1 * (1 - 2 * (v2 < 0))
if self.type != Node.NODE_TYPE_OPERATOR:
return self.value
left_value = self.left.reduce_negative_number()
right_value = self.right.reduce_negative_number()
return_value = Node.operation(self.ch, left_value, right_value)
if self.ch not in '+-':
self.value = abs(return_value)
return return_value
char_map = {'+': 1, '-': -1, 1: '+', -1: '-'}
left_opt = 1
right_opt = char_map[self.ch]
left_opt = _neg(left_opt, left_value)
left_value = _neg(left_value, left_value)
right_opt = _neg(right_opt, right_value)
right_value = _neg(right_opt, right_value)
left_opt = _neg(left_opt, return_value)
right_opt = _neg(right_opt, return_value)
if left_opt == 1:
self.ch = char_map[right_opt]
else:
self.ch = '-'
self.left, self.right = self.right, self.left
self.value = abs(return_value)
return return_value
def all_equivalent_expression(self):
"""
Return the list of all equivalent expression of an expression.
Rule 1 (equivalence by identical equation) is not considered.
If expression A induces expression B, B may not induce A.
"""
if self.type != Node.NODE_TYPE_OPERATOR:
return
left_equal_list = self.left.all_equivalent_expression()
right_equal_list = self.right.all_equivalent_expression()
left_value, right_value = self.left.value, self.right.value
for new_left in left_equal_list:
yield Node(Node.NODE_TYPE_OPERATOR, self.ch, new_left, self.right)
for new_right in right_equal_list:
yield Node(Node.NODE_TYPE_OPERATOR, self.ch, self.left, new_right)
# Rule 2: x-0 --> x+0
# x/1 --> x*1
# 0/x --> 0*x
if self.ch == '-' and right_value == 0:
yield Node(Node.NODE_TYPE_OPERATOR, '+', self.left, self.right)
if self.ch == '/' and right_value == 1:
yield Node(Node.NODE_TYPE_OPERATOR, '*', self.left, self.right)
if self.ch == '/' and left_value == 0:
yield Node(Node.NODE_TYPE_OPERATOR, '*', self.left, self.right)
# Rule 3: (x?y)+0 --> (x+0)?y, x?(y+0)
# (x?y)*1 --> (x*1)?y, x?(y*1)
if ((self.ch == '+' and right_value == 0) or
(self.ch == '*' and right_value == 1)) \
and self.left.type == Node.NODE_TYPE_OPERATOR:
yield Node(Node.NODE_TYPE_OPERATOR, self.left.ch, Node(Node.NODE_TYPE_OPERATOR, self.ch, self.left.left,
self.right), self.left.right)
yield Node(Node.NODE_TYPE_OPERATOR, self.left.ch, self.left.left,
Node(Node.NODE_TYPE_OPERATOR, self.ch, self.left.right, self.right))
# Rule 4: (y+z)/x --> (x-y)/z, (x-z)/y when x=y+z
if self.ch == '/' and self.left.ch == '+' and \
left_value == right_value and \
self.left.left.value != 0 and self.left.right.value != 0:
yield Node(Node.NODE_TYPE_OPERATOR, '/', Node(Node.NODE_TYPE_OPERATOR, '-', self.right, self.left.left),
self.left.right)
yield Node(Node.NODE_TYPE_OPERATOR, '/', Node(Node.NODE_TYPE_OPERATOR, '-', self.right, self.left.right),
self.left.left)
# Rule 5: x*(y/y) --> x+(y-y)
if self.ch == '*' and self.right.ch == '/' and right_value == 1:
yield Node(Node.NODE_TYPE_OPERATOR, '+', self.left,
Node(Node.NODE_TYPE_OPERATOR, '-', self.right.left, self.right.right))
# Rule 6: x_1/x_2 --> x_2/x_1
if self.ch == '/' and left_value == right_value:
yield Node(Node.NODE_TYPE_OPERATOR, '/', self.right, self.left)
# Rule 7: Changing two sub-expressions which have the same result
# doesn't change the equivalence class of this expression.
left_node_list = self.left.node_list()
right_node_list = self.right.node_list()
for nl, nr in itertools.product(left_node_list, right_node_list):
if nl.value == nr.value:
nl.type, nl.left, nl.right, nl.ch, nl.value, \
nr.type, nr.left, nr.right, nr.ch, nr.value = \
nr.type, nr.left, nr.right, nr.ch, nr.value, \
nl.type, nl.left, nl.right, nl.ch, nl.value
yield deepcopy(self)
nl.type, nl.left, nl.right, nl.ch, nl.value, \
nr.type, nr.left, nr.right, nr.ch, nr.value = \
nr.type, nr.left, nr.right, nr.ch, nr.value, \
nl.type, nl.left, nl.right, nl.ch, nl.value
# Rule 8: 2*2 --> 2+2
# 4/2 --> 4-2
if self.ch == '*' and left_value == 2 and right_value == 2:
yield Node(Node.NODE_TYPE_OPERATOR, '+', self.left, self.right)
if self.ch == '/' and left_value == 4 and right_value == 2:
yield Node(Node.NODE_TYPE_OPERATOR, '-', self.left, self.right)
def unique_id_for_rule_1(self, values_list: list) -> tuple:
"""
Return the unique id of this expression.
Two expressions is equivalent by rule 1 iff they have the same id.
"""
results = [self.evaluate(values) for values in values_list]
return tuple(results)
def _build_node(node) -> Node:
"""Convert an AST node to an expression node."""
node_ref = {type(ast.Add()): '+', type(ast.Sub()): '-', type(ast.Mult()): '*', type(ast.Div()): '/'}
if isinstance(node, ast.BinOp) and type(node.op) in node_ref:
built_node = Node(_type=Node.NODE_TYPE_OPERATOR,
ch=node_ref[type(node.op)],
left=_build_node(node.left),
right=_build_node(node.right))
elif isinstance(node, ast.Num) and type(node.n) is int:
built_node = Node(_type=Node.NODE_TYPE_NUMBER, ch=node.n)
else:
raise SyntaxError('Unallowed operator or operands.')
return built_node
def build_node(token: str) -> Node:
"""Convert a token/string to an AST node."""
token_ast = ast.parse(token, mode='eval').body
node = _build_node(token_ast)
node.reduce_negative_number()
return node | PypiClean |
/Flask-WaffleConf-0.3.1.tar.gz/Flask-WaffleConf-0.3.1/docs/source/index.rst | Welcome to Flask-WaffleConf's documentation!
============================================
WaffleConf is a Flask extension that enables storage of configuration variables
in the database as well as runtime modification of said variables.
**Released under GPLv2+ license.**
Latest version: **0.3.0**
Contents:
.. toctree::
:hidden:
self
.. toctree::
:maxdepth: 1
quickstart
configuration
multiproc
usage
.. toctree::
:maxdepth: 4
flask_waffleconf
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| PypiClean |
/CAM2ImageArchiver-2.0.3.tar.gz/CAM2ImageArchiver-2.0.3/README.md | # CAM2 Image Archiver

### Citation ###
If you use this software, please include the following statement in acknowledgments
"The image archiving program is provided by the CAM2 (Continuous Analysis
of Many CAMeras) project at Purdue University."
### What is this repository for? ###
* This repository stores the source code for retrieving data (image
or video) from network cameras.
* This is part of Purdue's CAM2 (Continuous Analysis of Many CAMeras)
project. The project's web site is https://www.cam2project.net/
* Please read the terms of use
https://www.cam2project.net/terms/
In particular, "You agree not to use the Platform to determine the
identity of any specific individuals contained in any video or video
stream."
* Software licensed under Apache license. See LICENSE.txt for details.
* The lead investigator is Dr. Yung-Hsiang Lu, yunglu@purdue.edu. Please
send your questions, comments, or suggestions to him.
### Motivation ###
Thousands of network cameras are connected to the Internet and provide
real-time visual data (image or video). Many network cameras require
no password and anyone connected to the Internet can retrieve the
data,i.e., the data is publicly available. This program considers
only publicly available camera data.
Even though the data is publicly available to anyone interested
seeing, there are several problems. First, there is no central
repository where network cameras must register. Thus, significant
efforts must be taken to find various sources of data. Second,
different brands of network cameras need different methods to retrieve
the data. The cameas may also provide different data formats: some
provide individual JPEG images; some provide motion JPEG (MJPEG)
video; some others provide H.264 video.
Many organizations (such as departments of transportation) aggregate
streams of multiple cameras and put these streams on web sites.
However, these web sites have different formats and styles. Some web
sites use simple HTML; some use CSS; some use Javascript. Some web
sites have fixed URLs for different cameras. Some web site have
dynamically generated URLs reflecting the time (thus, the URLs are
always changing).
To solve these problems, researchers at Purdue University are
developing the software to retrieve data from heterogeneous sources.
This software requires a database that stores cameras' information
(how to retrieve the data). The repository contains some examples of
entries in a database (using MySQL).
### Documentation ###
Full documentation can be found at https://purduecam2project.github.io/CAM2ImageArchiver/index.html
### Prerequisites ###
* [Install MySQL](https://help.ubuntu.com/lts/serverguide/mysql.html) to maintain the camera database.
* [Install OpenCV](https://github.com/jayrambhia/Install-OpenCV) to decode the downloaded images.
```
sudo apt-get install libopencv-dev python-opencv
```
* Install MySQLdb to access the MySQL database from Python:
```
sudo apt-get install python-mysqldb
```
### Database Setup ###
* Create an empty MySQL database using the following MySQL command:
```
CREATE DATABASE cam2;
```
* Build the database using the provided file and the following Linux command:
```
mysql -u root -p cam2 < sample_database.sql
```
* Modify the database credentials in the ```archiver.py``` module:
```
DB_SERVER = 'localhost'
DB_USER_NAME = 'root'
DB_PASSWORD = ''
DB_NAME = 'cameras'
```
### Files ###
* ```CAM2ImageArchiver.py``` is the main Python module. It archives images from a single camera.
* ```camera.py``` provides classes to communicate with different types of cameras: IP cameras, non-IP cameras, and stream cameras.
* ```StreamParser.py``` is used by ```camera.py``` to parse JPEG and MJPEG streams.
* ```error.py``` contains custom Python Exceptions.
* ```CamerHandler.py``` splits the retrieval job into threads for parallel processing.
### Usage ###
Example usage can be found in the documentation.
This program downloads image snapshots from 2 sources
(1) A given URL address
(2) A camera ID in the MySQL database * MySQL database must be available on host computer.
| PypiClean |
/LumberMill-0.9.5.7-py3-none-any.whl/lumbermill/assets/webserver_docroot/static/js/gambolputty_web.js | $(document).ready(function() {
updateServerSystemInfo()
setInterval(updateServerSystemInfo,5000);
updateLogs()
})
function updateLogs() {
// Select all log divs.
$("div:regex(id, .*_log)").each(function(idx) {
// Extract hostname.
var hostname = $(this).attr('id').replace('_log', '');
var container = this
var ws = new WebSocket('ws://' + hostname +":"+location.port+"/"+ globalSettings.getLogsUrl);
ws.onmessage = function(evt) {
data = JSON.parse(evt.data);
log_message = ansi_up.ansi_to_html(data.log_message);
content = $(container).html() + log_message + "<br>"
$(container).html(content)
};
//ws.onerror = function(evt) { console.log(evt) };
})
}
function confirmRestartGambolPuttyService(hostname) {
bootbox.confirm("Really restart LumberMill service on server "+hostname+"?", function(result) {
if(result) {
restartGambolPuttyService(hostname)
}
});
}
function restartGambolPuttyService(hostname) {
$.getJSON("http://"+hostname+":"+location.port+"/"+globalSettings.restartServiceUrl, function(jsonData) {
console.log(jsonData)
})
}
function updateServerSystemInfo() {
// Select all sysinfo divs.
$("div:regex(id, .*_sysinfo)").each(function(idx) {
// Extract hostname.
var hostname = $(this).attr('id').replace('_sysinfo', '');
var container = this
// Get info from server.
$.getJSON("http://"+hostname+":"+location.port+"/"+globalSettings.serverInfoUrl, function(jsonData) {
// Set CPU count.
var selector = '#'+escapeSelector(hostname+"_cpus")
$(selector).html(" "+jsonData.cpu_count+" CPUs")
// Set RAM size.
var selector = '#'+escapeSelector(hostname+"_ram")
$(selector).html(" "+bytesToSize(jsonData.memory.total)+" total, "+bytesToSize(jsonData.memory.available)+" free")
// Set system load.
var selector = '#'+escapeSelector(hostname+"_load")
$(selector).html(" "+roundToFixed(jsonData.load[0], 2)+", "+roundToFixed(jsonData.load[1], 2)+", "+roundToFixed(jsonData.load[2], 2)+" ")
// Set disk info.
var selector = '#'+escapeSelector(hostname+"_hdds")
// Clear container
$(selector).html("")
for(disk in jsonData.disk_usage) {
elements = $('<div/>').html('<h5><i class="fa fa-hdd-o pull-left"></i><span>'+disk+' , '+bytesToSize(jsonData.disk_usage[disk].total)+' total,'+bytesToSize(jsonData.disk_usage[disk].free)+' free'+'</span></h5>').contents();
$(selector).append(elements)
//console.log(roundToFixed(jsonData.disk_usage[disk].free, 0))
}
// Show sysinfo.
if ($(container).hasClass('invisible')) {
$(container).hide().removeClass('invisible').fadeIn(500)
}
})
})
}
function showServerConfiguration() {
// Gets server config from server.
} | PypiClean |
/Gpyts-1.0.3-py3-none-any.whl/gpyts/asyncGpyts/__init__.py |
#MIT License
#Copyright (c) 2021 Ripe
import asyncio, aiohttp, aiofiles, asyncio, random, json, os, io, re
from .. import config, errors
from typing import Union, List
from .. types import Translation, TextToSpeech
class Gpyts():
"""Gpyts is a library for Google translation and gTTS using Google Translation API.
"""
def __init__(self, tld: Union[str, List[str]] = None, endpoint: Union[str, List[str]] = None, client: str = None, minimal: bool = False, labled: bool = True, proxy: str = None) -> None:
"""Configuration for Service Url and Client.
Note:
Provide endpoint, client only if you know valid combination of values.
Example of tld(s):
co.uk, tl
Example of endpoint(s):
translate.google.com, client0.google.com, translate.googleapis.com
Example of client(s):
gtx, t, dict-chrome-ex, webapp (needs `tk` token)
Either use `tld` or `endpoint`, it wont work together. Just `tld` is required for most part even thats optional too.
Args:
tld (str | List[str], Optional): Custom tld's you can provide like `com` or `co.uk`.
endpoint (str | List[str], Optional): Custom endpoint url to be used (random choosed if multiple provided) than default `endpoint`.
client (str, Optional): Custom client to be used than default `client`.
minimal (bool, Optional): Result is simple, just a translation.
labled (bool, Optional): Method return either labled or indexed json to be used.
proxy (str, optional): Proxy to be used like `http://user:pass@ip:port`.
"""
self.__aioses = None
self.__tld = tld or ''
self.endpoint = config.tdlpoint if tld else endpoint or config.endpoint
self.client = client or config.client
self.__method = config.method[int(minimal)]
self.__labled = int(labled)
self.proxy = proxy if proxy and re.match(r'^(http|https)://',proxy) else None
async def translate(self, text: str, to_lang: str, from_lang: str = 'auto', i_enc: str = 'UTF-8', o_enc: str = 'UTF-8', web: bool = False) -> Translation:
"""Translate given text to target langauge.
Args:
text (str): Text to be translated.
to_lang (str): Target language code to be translated.
from_lang (str, Optional): Source langauge code to be translated.
i_enc (str, Optional): Input encoding.
o_enc (str, Optional): Onput encoding.
web (bool, Optional) : Uses (scrap) mini version of google translate web instead of api.
Returns:
Translation (obj): Result class object of translation.
Raises:
FloodError: If google translation api gives http 503.
ConfigError: If `endpoint` or `client` is invalid.
InvalidLanguage: If given `to_lang` or `from_lang` is an unlisted language code.
"""
cfgvar = {
'q' : text,
'hl' : 'en',
'sl' : from_lang,
'tl' : to_lang,
'dt' : ['t','at','rm'],
'ie' : i_enc,
'oe' : o_enc,
'sp' : 'pbmt',
'dj' : self.__labled,
'client' : self.client
}
result = await self.__request('https://{endpoint}{tld}/{method}'.format(
endpoint = random.choice(self.endpoint) if type(self.endpoint) == list else self.endpoint,
tld = random.choice(self.__tld) if type(self.__tld) == list else self.__tld,
method = 'm' if web else '%s_a/%s' % (config.key[1], self.__method)
),
var = await self.__isvalid(cfgvar),
proxy = self.proxy
)
return Translation(await self.__parsets(result) if web else json.loads(result))
async def tts(self, text: str, lang: str, download: Union[str, bool, io.BytesIO] = './', slow: bool = False, i_enc: str = 'UTF-8') -> TextToSpeech:
"""Converts given Text to speech in target langauge.
Args:
text (str): Text to be converted.
lang (str): Target language code to be converted.
download (str, Optional) : Downloads to a specified path.
i_enc (str, Optional): Input encoding.
Returns:
TextToSpeech (obj): Result class object of tts.
Raises:
FloodError: If google translation api gives http 503.
ConfigError: If `endpoint` or `client` is invalid.
InvalidLanguage: If given `lang` is an unlisted language code.
"""
cfgvar = {
'q' : text,
'ie' : i_enc,
'hl' : 'en',
'tl' : lang,
'client': self.client or 'tw-ob',
'ttsspeed': 1.-slow or .3,
'total' : 1,
'idx': 0,
}
result = await self.__request('https://{endpoint}{tld}/{method}'.format(
endpoint = random.choice(self.endpoint) if type(self.endpoint) == list else self.endpoint,
tld = random.choice(self.__tld) if type(self.__tld) == list else self.__tld,
method = '%s_tts' % config.key[1]
),
var = await self.__isvalid(cfgvar),
proxy = self.proxy,
full = True
)
return TextToSpeech({'lang' : lang, 'text' : text, 'file' : await self.__savetts(download, result._content) or result.url})
async def iso(self, full: bool = False) -> dict:
"""Lists all supported iso langauge codes for both google translate (gts) and text2speech (tts).
Returns:
langs (dict of list[str]) : Having both `gts` and `tts`.
"""
return {'gts' : config.supported_gts_lang if full else config.supported_gts_lang.values(), 'tts' : config.supported_tts_lang}
async def __isvalid(self, var: dict) -> dict:
"""Validates var
Args:
var (dict): Var to be validated,
"""
if not var['q']:
raise ValueError("Text can't be empty")
if not var.get('sl') and var['tl'] not in config.supported_tts_lang:
raise errors.InvalidLanguage("Unlisted target language code given. tts")
if var.get('tl') and var['tl'] not in config.supported_gts_lang.values():
raise errors.InvalidLanguage("Unlisted target language code given. gts")
if var.get('sl') and var['sl'] not in config.supported_gts_lang.values() and var['sl'] != 'auto':
raise errors.InvalidLanguage("Unlisted source language code given. gts")
return var
async def __parsets(self, content: str) -> dict:
"""Parses translation from content
Args:
content (str): Content from which to be extracted.
"""
match = re.search(r"aria-label=\"Source text\".+value=\"(.*)\"><div class=\"translate-button-container\">.+<div class=\"result-container\">(.*)</div><div class=\"links-container\">", content.decode('UTF-8'), re.MULTILINE)
result = {}
if match:
result = {
'src' : match.group(1),
'sentences' : [{'trans' : match.group(2)}]
}
return result
async def __savetts(self, path: Union[str, bool, io.BytesIO], payload: Union[bytes, str]):
"""Saves tts to local file
Args:
path Union[str, bool, io.BytesIO]: Path to save file.
payload (byte): Content of the tts output.
"""
if type(path) == io.BytesIO:
path.write(payload)
elif path or path == None:
paths = path.rsplit('/', 1)
if len(paths)> 1:
os.makedirs(path.rsplit('/', 1)[0], exist_ok=True)
if len(paths)> 1 and not paths[1]:
path += 'text2speech.mp3'
async with aiofiles.open(path, 'wb') as f:
await f.write(payload)
else:
path = False
return path
async def __request(self, url: str, var: dict, proxy: dict, full: bool = False) -> dict:
"""Request to google translator api
Args:
var (dict): Configuration arguemnts for translator.
"""
self.__aioses = self.__aioses or aiohttp.ClientSession(headers = config.headers)
async with self.__aioses.get(url, params = var, proxy = proxy) as response:
if response.status == 200:
response._content = await response.read()
return response if full else response._content
elif response.status in [404, 403, 408, 504]:
raise errors.ConfigError('Invalid endpoint url or client given.')
elif response.status in [429, 503]:
raise errors.FloodError('Too many requests please try later.')
else:
raise response.raise_for_status()
def __del__(self):
if self.__aioses:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
if loop.is_running():
loop.create_task(self.__aioses.close())
else:
loop.run_until_complete(self.__aioses.close()) | PypiClean |
/Bashkort_messenger-0.0.1-py3-none-any.whl/client/client_DB.py | import os
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, DateTime, Text
from sqlalchemy.orm import mapper, sessionmaker
import datetime
from server.server_DB import ServerStorage
global client_name
dir_path = os.path.dirname(os.path.realpath(__file__))
database_server = ServerStorage(
os.path.join(
dir_path,
'../server/server_database'))
class ClientStorage:
server_database = database_server
class AllUsersClient:
def __init__(self, username, ip_address, port, sender_count, recepient_count):
self.id = None
self.username = username
self.ip_address = ip_address
self.port = port
self.sender_count = sender_count
self.recepient_count = recepient_count
class MessageHistory:
def __init__(self, from_user, to_user, message, date):
self.id = None
self.from_user = from_user
self.to_user = to_user
self.message = message
self.date = date
class UsersContactsList:
def __init__(self, username, contact_name):
self.username = username
self.contact_name = contact_name
def __init__(self, name):
self.database_engine = create_engine(f'sqlite:///client_{name}.db', echo=False, pool_recycle=7200,
connect_args={'check_same_thread': False})
global client_name
client_name = name
self.metadata = MetaData()
users_table = Table('UsersClient', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String, unique=True),
Column('ip_address', String),
Column('port', String),
Column('sender_count', Integer),
Column('recepient_count', Integer)
)
message_history = Table('message_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('from_user', String),
Column('to_user', String),
Column('message', Text),
Column('date', DateTime)
)
users_contacts = Table('users_contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String),
Column('contact_name', String),
)
# Создаем таблицы
self.metadata.create_all(self.database_engine)
mapper(self.AllUsersClient, users_table)
mapper(self.MessageHistory, message_history)
mapper(self.UsersContactsList, users_contacts)
# Создаем сессию
session = sessionmaker(bind=self.database_engine)
self.session = session()
def contacts_clear(self):
"""Метод очищающий таблицу со списком контактов."""
self.session.query(self.UsersContactsList).delete()
def users_clear(self):
"""Метод очищающий таблицу со списком контактов."""
self.session.query(self.AllUsersClient).delete()
def update_users(self):
self.load_users_from_server()
def update_contacts(self):
self.load_contact_from_server()
def user_list_client(self, username=None):
query = self.session.query(
self.AllUsersClient.id,
self.AllUsersClient.username,
self.AllUsersClient.ip_address,
self.AllUsersClient.port,
)
if username:
query = query.filter(self.AllUsersClient.username == username)
return query.all()
def contacts_list(self, username=None):
query = self.session.query(
self.UsersContactsList.username,
self.UsersContactsList.contact_name,
)
if username:
contacts = set()
users = self.user_list_client()
for item in query:
cont_obj = [obj for obj in users if obj[1] == item[1]][0]
contacts.add(cont_obj)
return contacts
return query.all()
def load_users_from_client(self):
users = self.user_list_client()
return users
"""
Метод необходим в случае если база данных клиента слетела, либо ее не было, как в моем случае
и если клиент уже зарегистрирован, программа клиент ничего не знает о доступных пользователях.
"""
def load_users_from_server(self, username=None):
if username:
item = self.server_database.user_list(username)[0]
item = self.AllUsersClient(item.username, item.ip_address, item.port, item.sender_count,
item.recepient_count)
self.session.add(item)
self.session.commit()
return item
else:
users = sorted(self.server_database.user_list())
for item in users:
user = self.AllUsersClient(item.username, item.ip_address, item.port, item.sender_count,
item.recepient_count)
self.session.add(user)
self.session.commit()
return users
def get_contact(self):
query = self.session.query(
self.UsersContactsList.username,
self.UsersContactsList.contact_name,
)
return query.all()
"""
Метод необходим в случае если база данных клиента слетела, либо ее не было, как в моем случае,
а клиент уже зарегистрирован, программа клиент ничего не знает о контактах.
"""
def load_contact_from_server(self):
res = self.server_database.contacts_list(client_name)
user_contacts = []
for item in res:
if item.contact_name not in user_contacts:
contact = self.UsersContactsList(item.username, item.contact_name)
user_contacts.append(item.contact_name)
self.session.add(contact)
self.session.commit()
return res
def add_contact(self, contact_name):
res = self.session.query(self.UsersContactsList).filter_by(contact_name=contact_name)
# print(res)
if not res.count():
try:
query = self.session.query(self.AllUsersClient).filter_by(username=client_name)
username = query.first().username
except Exception:
username = ''
contacts = self.UsersContactsList(username, contact_name)
self.session.add(contacts)
self.session.commit()
def del_contact(self, del_contact_name):
self.session.query(self.UsersContactsList).filter_by(contact_name=del_contact_name).delete()
self.session.commit()
"""
Метод необходим в случае если база данных клиента слетела, либо ее не было, как в моем случае,
а клиент уже зарегистрирован, программа клиент ничего не знает о cвоих сообщениях.
"""
def load_history_server_db(self):
res = self.server_database.contacts_list(client_name)
res_to = self.server_database.to_client_message(client_name)
for item in res:
contact = self.MessageHistory(item.username, item.contact_name, item.message, item.contact_time)
self.session.add(contact)
for item in res_to:
contact_to = self.MessageHistory(item.username, item.contact_name, item.message, item.contact_time)
self.session.add(contact_to)
self.session.commit()
return res, res_to
def save_message(self, from_user, to_user, message):
date = datetime.datetime.now()
print(f'from_user - {from_user}')
print(f'to_user {to_user}')
print(f'message {message}')
print(f'date {date}')
message_row = self.MessageHistory(from_user, to_user, message, date)
self.session.add(message_row)
self.session.commit()
def get_history(self, from_user=None, to_user=None):
query = self.session.query(self.MessageHistory).filter_by(from_user=from_user, to_user=to_user)
query_to = self.session.query(self.MessageHistory).filter_by(from_user=to_user, to_user=from_user)
history = []
if query.count():
if from_user:
history = [(history_row.from_user, history_row.to_user, history_row.message, history_row.date)
for history_row in query.all()]
if to_user:
history.extend([
(history_row.from_user, history_row.to_user, history_row.message, history_row.date)
for history_row in query_to.all()])
return history
else:
self.load_history_server_db()
def init(self):
# print(self.load_users_from_client())
# Если нет известных пользователей, значит и базы не было, подгружаем с сервера
if not self.load_users_from_client():
self.load_users_from_server()
self.load_contact_from_server()
self.load_history_server_db()
# Функция проверяющяя наличие пользователя в известных
def check_user(self, user):
if self.session.query(self.AllUsersClient).filter_by(username=user).count():
return True
else:
return False
# Функция проверяющяя наличие пользователя контактах
def check_contact(self, contact):
if self.session.query(self.UsersContactsList).filter_by(contact_name=contact).count():
return True
else:
return False
if __name__ == '__main__':
test_db = ClientStorage('client_Test_client')
# test_db.load_users_from_client()
# test_list = test_db.load_users_from_client()
#
# if not test_db.load_users_from_client():
# test_db.load_users_from_server()
# test_db.load_contact_from_server()
#
# print(test_db.get_contact('Russia'))
# test_db.add_contact('Russia', 'client_3')
# print(test_db.get_contact('Russia'))
# test_db.del_contact('Russia', 'client_3')
# print(test_db.get_contact('Russia'))
# test_db.save_message('Russia', 'client_2',
# f'Тестовое сообщение от Russia!')
# test_db.save_message('client_2', 'Russia',
# f'Другое сообщение от Russia')
# pprint(test_db.get_history())
# print("Версия SQLAlchemy:", sqlalchemy.__version__)
# test_db.user_login('client_1', '127.0.0.1', 7777)
# test_db.user_login('client_2', '127.0.0.1', 8888)
# test_db.user_login('client_3', '127.0.0.1', 7878)
# test_db.user_login('client_4', '127.0.0.1', 7888)
# test_db.user_login('client_5', '127.0.0.1', 7888)
# print('============== test AllUsers ==============')
# pprint(test_db.user_list())
#
# test_db.add_contact('client_2', 'client_1')
# test_db.add_contact('client_2', 'client_3')
# test_db.add_contact('client_3', 'client_1')
# test_db.add_contact('client_3', 'client_2')
# print('============== test ClientsContacts ==============')
# test_db.contacts_list('client_2')
# test_db.contacts_list(None)
# pprint(test_db.contacts_list('client_2'))
#
# print('============== test ClientsHistory ==============')
# pprint(test_db.history())
# pprint(test_db.history('client_3')) | PypiClean |
/Chiplotle-0.4.1.tar.gz/Chiplotle-0.4.1/chiplotle/fonts/dorkbot.py |
a = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,1,],
[0,0,0,],
]
a_ = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[1,0,0,],
[0,0,0,],
]
b= [
[1,0,0,],
[1,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
b_= [
[0,0,1,],
[0,0,1,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
c= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,0,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
c_= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
d= [
[0,0,1,],
[0,0,1,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
d_= [
[1,0,0,],
[1,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
e= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,1,0,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
e_= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,1,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
f = [
[1,1,1,],
[1,0,0,],
[1,1,0,],
[1,0,0,],
[1,0,0,],
[0,0,0,],
[0,0,0,],
]
g = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,1,],
[1,1,1,],
]
h = [
[1,0,0,],
[1,0,0,],
[1,1,1,],
[1,0,1,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
i= [
[0,1,0,],
[0,0,0,],
[0,1,0,],
[0,1,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
i_= i
j = [
[0,0,1,],
[0,0,0,],
[0,0,1,],
[0,0,1,],
[0,0,1,],
[1,0,1,],
[1,1,1,],
]
k= [
[1,0,0,],
[1,0,0,],
[1,0,1,],
[1,1,0,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
k_= [
[0,0,1,],
[0,0,1,],
[1,0,1,],
[0,1,1,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
l= [
[0,1,0,],
[0,1,0,],
[0,1,0,],
[0,1,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
l_= l
m= [
[1,0,1,],
[1,1,1,],
[1,0,1,],
[1,0,1,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
m_= m
n= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
n_= n
o= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
o_= o
p = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[1,0,0,],
[1,0,0,],
]
q = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,1,],
[1,1,1,],
[0,0,1,],
[0,0,1,],
]
r= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[1,0,0,],
[1,0,0,],
[0,0,0,],
[0,0,0,],
]
r_= [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,0,1,],
[0,0,1,],
[0,0,0,],
[0,0,0,],
]
s = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,1,0,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
t= [
[0,1,0,],
[0,1,0,],
[1,1,1,],
[0,1,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
t_= t
u = [
[0,0,0,],
[0,0,0,],
[1,0,1,],
[1,0,1,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
v = [
[0,0,0,],
[0,0,0,],
[1,0,1,],
[1,0,1,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
w = [
[1,0,1,],
[1,0,1,],
[1,0,1,],
[1,1,1,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
x = [
[0,0,0,],
[0,0,0,],
[1,0,1,],
[0,1,0,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
]
y= [
[0,0,0,],
[0,0,0,],
[1,0,1,],
[1,0,1,],
[1,1,1,],
[0,0,1,],
[1,1,1,],
]
y_= [
[0,0,0,],
[0,0,0,],
[1,0,1,],
[1,0,1,],
[1,1,1,],
[1,0,0,],
[1,1,1,],
]
z = [
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,1,0,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
]
dash= [
[0,0,0,],
[0,0,0,],
[0,0,0,],
[1,1,1,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
]
bang= [
[0,0,0,],
[0,1,0,],
[0,1,0,],
[0,1,0,],
[0,0,0,],
[0,1,0,],
[0,0,0,],
]
period = [
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
comma = [
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,1,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
colon = [
[0,0,0,],
[0,0,0,],
[0,1,0,],
[0,0,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
semicolon = [
[0,0,0,],
[0,0,0,],
[0,1,0,],
[0,0,0,],
[0,1,0,],
[1,0,0,],
[0,0,0,],
]
plus = [
[0,0,0,],
[0,0,0,],
[0,1,0,],
[1,1,1,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
]
backslash = [
[0,0,0,],
[0,0,0,],
[1,0,0,],
[0,1,0,],
[0,0,1,],
[0,0,0,],
[0,0,0,],
]
forwardslash = [
[0,0,0,],
[0,0,0,],
[0,0,1,],
[0,1,0,],
[1,0,0,],
[0,0,0,],
[0,0,0,],
]
singlequote = [
[0,0,0,],
[0,1,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
]
doublequote = [
[0,0,0,],
[1,0,1,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
]
space = [
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
[0,0,0,],
]
char_dict = {'a':a, 'b':b, 'c':c, 'd':d, 'e':e, 'f':f, 'g':g, 'h':h,
'i':i, 'j':j, 'k':k, 'l':l, 'm':m, 'n':n, 'o':o, 'p':p, 'q':q, 'r':r,
's':s, 't':t, 'u':u, 'v':v, 'w':w, 'x':x, 'y':y, 'z':z, '-':dash,
'!':bang, '.':period, ',':comma, ':':colon, ';':semicolon, '+':plus,
'\\':backslash, '/':forwardslash, '\'':singlequote, '"':doublequote,
' ':space} | PypiClean |
/Nutter-Tools-0.0.32.tar.gz/Nutter-Tools-0.0.32/README.md | # API for finding same type of files and copy in a specific path
[](https://www.python.org/)
[](https://www.python.org/downloads/release/python-360/)
[Follow Doveloper](https://www.instagram.com/nicky_connects/?next=%2F)
## Functionality of the Music Player
- Better Optimization
- Pause/Play Supported
- Add/Delete songs from Playlist
- Previous/Next song function
- Time duration of song / next song displays
- List of all the songs.
- Adjust Volume
- Automatically Playing in Queue
- Play Selected song from Playlist
## Usage
- Make sure you have Python installed in your system.
- Run Following command in the CMD.
```
pip install NTools
```
## Example
```
#test.py
from NTools import copy_files
#Make sure you entered the correct file extension.
extension = '.pdf'
# enter the source and destination path as follows
s_path = "your source directory"
d_path = "your destination directory"
# Now the Function call should be like this
copy_files(s_path,d_path,extension)
```
## Run the following Script.
```
python test.py
```
## Output
- x files copied
- No files found with the extension
## Note
- I have tried to implement all the functionality, it might have some bugs also. Ignore that or please try to solve that bug.
| PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/filesystem.py |
__revision__ = "src/engine/SCons/Tool/filesystem.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons
from SCons.Tool.install import copyFunc
copyToBuilder, copyAsBuilder = None, None
def copyto_emitter(target, source, env):
""" changes the path of the source to be under the target (which
are assumed to be directories.
"""
n_target = []
for t in target:
n_target = n_target + [t.File( str( s ) ) for s in source]
return (n_target, source)
def copy_action_func(target, source, env):
assert( len(target) == len(source) ), "\ntarget: %s\nsource: %s" %(list(map(str, target)),list(map(str, source)))
for t, s in zip(target, source):
if copyFunc(t.get_path(), s.get_path(), env):
return 1
return 0
def copy_action_str(target, source, env):
return env.subst_target_source(env['COPYSTR'], 0, target, source)
copy_action = SCons.Action.Action( copy_action_func, copy_action_str )
def generate(env):
try:
env['BUILDERS']['CopyTo']
env['BUILDERS']['CopyAs']
except KeyError as e:
global copyToBuilder
if copyToBuilder is None:
copyToBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Dir,
source_factory = env.fs.Entry,
multi = 1,
emitter = [ copyto_emitter, ] )
global copyAsBuilder
if copyAsBuilder is None:
copyAsBuilder = SCons.Builder.Builder(
action = copy_action,
target_factory = env.fs.Entry,
source_factory = env.fs.Entry )
env['BUILDERS']['CopyTo'] = copyToBuilder
env['BUILDERS']['CopyAs'] = copyAsBuilder
env['COPYSTR'] = 'Copy file(s): "$SOURCES" to "$TARGETS"'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/DerPyBooruPhi-0.10.3.tar.gz/DerPyBooruPhi-0.10.3/derpibooru/posts.py |
# Copyright (c) 2014, Joshua Stone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .request import get_posts, url_search_posts
from .post import Post
from .helpers import tags, join_params, set_limit
__all__ = [
"SearchPosts"
]
class SearchPosts(object):
"""
All properties are read-only, and every method returns a new instance of
SearchPosts() to avoid mutating state in ongoing search queries. This makes object
interactions predictable as well as making versioning of searches relatively
easy.
"""
def __init__(self, q={"created_at.gte:1 week ago",}, limit=50,
per_page=25, page=1, url_domain="https://derpibooru.org", proxies={}):
"""
By default initializes an instance of Posts with the parameters to get
the first 25 posts on Derpibooru's posts search page.
"""
self.proxies = proxies
self.url_domain = url_domain
self._params = {
"q": tags(q),
"per_page": set_limit(per_page),
"page": set_limit(page)
}
self._limit = set_limit(limit)
self._search = get_posts(self._params, self._limit,
url_domain=self.url_domain, proxies=self.proxies)
def __iter__(self):
"""
Make SearchPosts() iterable so that new search results can be lazily generated
for performance reasons.
"""
return self
@property
def parameters(self):
"""
Returns a list of available parameters; useful for passing state to new
instances of SearchPosts().
"""
return self._params
@property
def url(self):
"""
Returns a search URL built on set parameters. Example based on default
parameters:
https://derpibooru.org/posts?page=1&per_page=25&pq=created_at.gte%3A1+week+ago
"""
return url_search_posts(self.parameters, url_domain=self.url_domain)
def query(self, *q):
"""
Takes one or more strings for searching by tag and/or metadata.
"""
params = join_params(self.parameters,
{"q": q, "limit": self._limit,
"url_domain": self.url_domain,
"proxies": self.proxies}
)
return self.__class__(**params)
def limit(self, limit):
"""
Set absolute limit on number of posts to return, or set to None to return
as many results as needed; default 50 posts. This limit on app-level.
"""
params = join_params(self.parameters, {"limit": limit,
"url_domain": self.url_domain,
"proxies": self.proxies})
return self.__class__(**params)
def query_append(self,*q):
"""
Adds tags to current search.
"""
query = self.parameters['q'].union(q)
params = join_params(self.parameters,
{"q": query, "limit": self._limit,
"url_domain": self.url_domain,
"proxies": self.proxies}
)
return self.__class__(**params)
def query_remove(self,*q):
"""
Removes tags from current search.
"""
query = self.parameters['q'].difference(q)
params = join_params(self.parameters,
{"q": query, "limit": self._limit,
"url_domain": self.url_domain,
"proxies": self.proxies}
)
return self.__class__(**params)
def get_page(self,page):
"""
Set page for gets result of search.
"""
params = join_params(self.parameters,
{"page": set_limit(page),
"limit": self._limit,
"url_domain": self.url_domain,
"proxies": self.proxies
}
)
return self.__class__(**params)
def per_page(self,limit):
"""
Set absolute limit on number of posts to get, or set to None to return
defaulting 25 posts; max 50 posts. This limit on API-level.
"""
params = join_params(self.parameters,
{"per_page": set_limit(limit),
"limit": self._limit,
"url_domain": self.url_domain,
"proxies": self.proxies
}
)
return self.__class__(**params)
def __next__(self):
"""
Returns a result wrapped in a new instance of Post().
"""
return Post(next(self._search), url_domain=self.url_domain, proxies=self.proxies) | PypiClean |
/BuilT-0.0.4-py3-none-any.whl/built/metric.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import logging
import numpy as np
import torch
from typing import Dict
class MetricBase(object):
__metaclass__ = abc.ABCMeta
class MetricStore(object):
def __init__(self):
self.__store: Dict[str, float] = {}
def add(self, key: str, value: float):
assert key is not None
assert value is not None
if key in self.__store.keys():
raise KeyError(f'{key} already exists.')
self.__store[key] = value
def update(self, key: str, value: float):
assert key is not None
assert value is not None
if key in self.__store.keys():
self.__store[key] = value
else:
raise KeyError(f'{key} does not exist.')
def get(self) -> Dict[str, float]:
return self.__store
def __init__(self):
self.store = self.MetricStore()
@abc.abstractmethod
def calc(self, outputs, targets, extra_data=None, is_train=False, device='cpu'):
print('test')
pass
def add(self, key: str, value: float):
try:
self.store.add(key, value)
except KeyError:
self.store.update(key, value)
def calculate(self, outputs, targets, extra_data=None, is_train=False, device='cpu') -> Dict[str, float]:
self.calc(outputs, targets, extra_data, is_train, device)
return self.store.get()
class DefaultMetric(MetricBase):
def calc(self, outputs, targets, daextra_datata=None, is_train=False, device='cpu'):
logging.debug("Default metric is called")
if isinstance(outputs, dict):
logits = outputs['logits']
else:
logits = outputs
if isinstance(logits, torch.Tensor):
logits = logits.cpu().detach().numpy()
if isinstance(labels, torch.Tensor):
labels = labels.cpu().detach().numpy()
assert len(logits.shape) == 2
predicts = np.argmax(logits, axis=1)
correct = np.sum((predicts == labels).astype(int))
total = predicts.shape[0]
accuracy = 100. * correct / total
self.add('accuracy', accuracy)
self.add('score', accuracy) | PypiClean |
/MetPy-1.5.1-py3-none-any.whl/metpy/interpolate/one_dimension.py | """Interpolate data along a single axis."""
import numpy as np
from .. import _warnings
from ..cbook import broadcast_indices
from ..package_tools import Exporter
from ..xarray import preprocess_and_wrap
exporter = Exporter(globals())
@exporter.export
@preprocess_and_wrap()
def interpolate_nans_1d(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : str
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y = y[x_sort_args]
nans = np.isnan(y)
if kind == 'linear':
y[nans] = np.interp(x[nans], x[~nans], y[~nans])
elif kind == 'log':
y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])
else:
raise ValueError(f'Unknown option for kind: {kind}')
return y[x_sort_args]
@exporter.export
@preprocess_and_wrap()
def interpolate_1d(x, xp, *args, axis=0, fill_value=np.nan, return_list_always=False):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
return_list_always: bool, optional
Whether to always return a list of interpolated arrays, even when only a single
array is passed to `args`. Defaults to ``False``.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> import metpy.interpolate
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.interpolate.interpolate_1d(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Handle units
x, xp = _strip_matching_units(x, xp)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# The shape after all arrays are broadcast to each other
# Can't use broadcast_shapes until numpy >=1.20 is our minimum
final_shape = np.broadcast(xp, *args).shape
# indices for sorting
sorter = broadcast_indices(sort_args, final_shape, axis)
# sort xp -- need to make sure it's been manually broadcast due to our use of indices
# along all axes.
xp = np.broadcast_to(xp, final_shape)
xp = xp[sorter]
# Ensure source arrays are also in sorted order
variables = [arr[sorter] for arr in args]
# Make x broadcast with xp
x_array = x[sort_x]
expand = [np.newaxis] * len(final_shape)
expand[axis] = slice(None)
x_array = x_array[tuple(expand)]
# Calculate value above interpolated value
minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])
minv2 = np.copy(minv)
# If fill_value is none and data is out of bounds, raise value error
if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None:
raise ValueError('Interpolation point out of data bounds encountered')
# Warn if interpolated values are outside data bounds, will make these the values
# at end of data range.
if np.max(minv) == xp.shape[axis]:
_warnings.warn('Interpolation point out of data bounds encountered')
minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1
if np.min(minv) == 0:
minv2[minv == 0] = 1
# Get indices for broadcasting arrays
above = broadcast_indices(minv2, final_shape, axis)
below = broadcast_indices(minv2 - 1, final_shape, axis)
if np.any(x_array < xp[below]):
_warnings.warn('Interpolation point out of data bounds encountered')
# Create empty output list
ret = []
# Calculate interpolation for each variable
for var in variables:
# Var needs to be on the *left* of the multiply to ensure that if it's a pint
# Quantity, it gets to control the operation--at least until we make sure
# masked arrays and pint play together better. See https://github.com/hgrecco/pint#633
var_interp = var[below] + (var[above] - var[below]) * ((x_array - xp[below])
/ (xp[above] - xp[below]))
# Set points out of bounds to fill value.
var_interp[minv == xp.shape[axis]] = fill_value
var_interp[x_array < xp[below]] = fill_value
# Check for input points in decreasing order and return output to match.
if x[0] > x[-1]:
var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis)
# Output to list
ret.append(var_interp)
if return_list_always or len(ret) > 1:
return ret
else:
return ret[0]
@exporter.export
@preprocess_and_wrap()
def log_interpolate_1d(x, xp, *args, axis=0, fill_value=np.nan):
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordinates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.interpolate.log_interpolate_1d(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
"""
# Handle units
x, xp = _strip_matching_units(x, xp)
# Log x and xp
log_x = np.log(x)
log_xp = np.log(xp)
return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
def _strip_matching_units(*args):
"""Ensure arguments have same units and return with units stripped.
Replaces `@units.wraps(None, ('=A', '=A'))`, which breaks with `*args` handling for
pint>=0.9.
"""
if all(hasattr(arr, 'units') for arr in args):
return [arr.to(args[0].units).magnitude for arr in args]
else:
# Handle the case where we get mixed 'dimensionless' and bare array. This happens e.g.
# when you pass in a DataArray with no units for one arg.
return [arr.m_as('dimensionless') if hasattr(arr, 'units') else arr for arr in args] | PypiClean |
/Nxpy-0.6.0.tar.gz/Nxpy-0.6.0/nxpy/etree/util.py |
# Copyright Nicola Musatti 2010 - 2017
# Use, modification, and distribution are subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# See http://nxpy.sourceforge.net for library home page. ---------------------
r"""
ElemenTree related utility classes and functions.
Requires at least Python 2.6. Simple import breaks on Python 2.5
"""
from __future__ import absolute_import
import collections
import re
import xml.etree.ElementTree
import six
import nxpy.core.error
import nxpy.core.past
nxpy.core.past.enforce_at_least(nxpy.core.past.V_2_6)
def make_property(elem, key=None):
r"""
Creates a property on the text of element 'elem' or, if the 'key' argument is given, on its
'key' attribute.
"""
if key:
def _get(self):
return getattr(self, elem).get(key)
def _set(self, value):
getattr(self, elem).set(key, value)
self._modified = True
return property(_get, _set)
else:
def _get(self):
return getattr(self, elem).text
def _set(self, value):
getattr(self, elem).text = value
self._modified = True
return property(_get, _set)
class QName(object):
r"""Represents a qualified name"""
_re = re.compile(r"\{(.*)\}(.*)")
def __init__(self, tag):
m = QName._re.match(tag)
self.url = m.group(1)
self.tag = m.group(2)
@property
def text(self):
t = []
if len(self.url) != 0:
t.append("{{{0}}}".format(self.url))
t.append(self.tag)
return "".join(t)
def __str__(self):
return self.text()
class Namespace(object):
r"""
Represents an XML namespace and provides several utility functions that help handle a
document without namespace tags.
"""
def __init__(self, url="", element=None):
if len(url) > 0 and element is not None:
raise nxpy.core.error.ArgumentError(
"Only one between url and element should be specified")
if element is not None:
url = QName(element.tag).url
self.url = url
self.nspace = "{" + url + "}" if len(url) != 0 else ""
def find(self, element, tag):
return element.find(self.nspace + tag)
def findall(self, element, tag):
return element.findall(self.nspace + tag)
def findtext(self, element, tag, default=None):
return element.findtext(self.nspace + tag, default)
def get_tag(self, element):
return element.tag[len(self.nspace):]
def Element(self, tag, attrib={}, **extra):
return xml.etree.ElementTree.Element(self.nspace + tag, attrib, **extra)
def SubElement(self, parent, tag, attrib={}, **extra):
return xml.etree.ElementTree.SubElement(parent, self.nspace + tag, attrib, **extra)
class ContainerElementMixin(Namespace):
def __init__(self, parent, root_tag, namespace=""):
super(ContainerElementMixin, self).__init__(namespace)
self.parent = parent
self.root_tag = root_tag
self.root = self.find(self.parent, self.root_tag)
self.modified = False
def __len__(self):
if self.root is None:
return 0
return len(self.root)
class MappingElementIterator(collections.Iterator):
def __init__(self, element):
self.element = element
self.iter = element.getchildren().iter()
def next(self):
return self.element.get_tag(next(self.iter))
class MappingElement(ContainerElementMixin, collections.MutableMapping):
def __init__(self, parent, root_tag, namespace=""):
ContainerElementMixin.__init__(self, parent, root_tag, namespace)
def __getitem__(self, key):
if self.root is None:
raise KeyError()
elem = self.find(self.root, key)
if elem is None:
raise KeyError()
return elem.text
def __setitem__(self, key, value):
if self.root is None:
self.root = self.SubElement(self.parent, self.root_tag)
elem = self.find(self.root, key)
if elem is None:
elem = self.SubElement(self.root, key)
self.modified = True
elem.text = value
def __delitem__(self, key):
if self.root is None:
raise KeyError()
elem = self.find(self.root, key)
if elem is None:
raise KeyError()
self.modified = True
self.root.remove(elem)
def __iter__(self):
return MappingElementIterator(self)
class SequenceElement(ContainerElementMixin, collections.MutableSequence):
def __init__(self, parent, root_tag, element_tag, namespace="", indent=" "):
ContainerElementMixin.__init__(self, parent, root_tag, namespace)
self.element_tag = element_tag
self.indent = indent
def __getitem__(self, index):
if self.root is None:
raise IndexError()
return self.root[index].text
def __setitem__(self, index, value):
if self.root is None:
self.root = self.SubElement(self.parent, self.root_tag)
elem = None
try:
elem = self.root[index]
except IndexError:
elem = self.SubElement(self.root, self.element_tag)
elem.text = value
self.modified = True
def __delitem__(self, index):
if self.root is None:
raise IndexError()
del self.root[index]
self.modified = True
def insert(self, index, value):
if self.root is None:
self.root = self.SubElement(self.parent, self.root_tag)
elem = self.Element(self.element_tag)
elem.text = value
elem.tail = self.root.tail + self.indent
self.root.insert(index, elem)
self.modified = True
class Writer(object):
_name_re = re.compile(r"<([^\s]+)")
_tag_re = re.compile(r"(</?)[^:]+:((:?[^>]+>)|(:?[^/]+/>))")
def __init__(self, root_tag, attributes=None, tab_size=0):
self.root_tag = root_tag
self.tab_size = tab_size
self.attributes = attributes
self.name = self._name_re.search(self.root_tag).group(1)
self._root_re = re.compile(r"(<" + self.name + r"[^>]+>)")
def marshal(self, node):
s = None
if nxpy.core.past.V_2_7.at_most():
s = xml.etree.ElementTree.tostring(node)
else:
s = xml.etree.ElementTree.tostring(node, encoding="unicode")
s = self._tag_re.sub(r"\1\2", s)
s = self._root_re.sub(self.root_tag, s, 1)
if self.tab_size > 0:
s = s.replace("\t", " " * self.tab_size)
if self.attributes is not None:
d = ( '<?xml version="' + self.attributes.get("version", "1.0") +
'" encoding="' + self.attributes.get("encoding", "UTF-8") + '"')
if "standalone" in self.attributes:
d += ' standalone="' + self.attributes["standalone"] + '"'
d += "?>\n"
s = d + s
return s + "\n\n"
def write(self, node, where):
if isinstance(where, six.string_types):
f = open(where, "w+")
else:
f = where
try:
f.write(self.marshal(node))
finally:
f.close() | PypiClean |
/Active-SQLAlchemy-0.4.0.tar.gz/Active-SQLAlchemy-0.4.0/README.md |
#Active-SQLAlchemy
**Version 0.3.***
---
Active-SQLAlchemy is a framework agnostic wrapper for SQLAlchemy that makes it really easy
to use by implementing a simple active record like api, while it still uses the db.session underneath.
Inspired by Flask-SQLAlchemy.
Works with Python 2.6, 2.7, 3.3, 3.4 and pypy.
---
##Quick Overview:
####Create the model
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy('sqlite://')
class User(db.Model):
name = db.Column(db.String(25))
location = db.Column(db.String(50), default="USA")
last_access = db.Column(db.Datetime)
####Create new record
user = User.create(name="Mardix", location="Moon")
# or
user = User(name="Mardix", location="Moon").save()
####Get all records
all = User.all()
####Get a record by id
user = User.get(1234)
####Update record
user = User.get(1234)
if user:
user.update(location="Neptune")
####Soft Delete a record
user = User.get(1234)
if user:
user.delete()
####Query Records
users = User.all(User.location.distinct())
for user in users:
...
####Query with filter
all = User.all().filter(User.location == "USA")
for user in users:
...
##How to use
### Install
pip install active_sqlalchemy
### Create a connection
The SQLAlchemy class is used to instantiate a SQLAlchemy connection to
a database.
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy(dialect+driver://username:password@host:port/database)
#### Databases Drivers & DB Connection examples
Active-SQLAlchemy comes with a `PyMySQL` and `PG8000` as drivers for MySQL
and PostgreSQL respectively, because they are in pure Python. But you can use
other drivers for better performance. `SQLite` is already built in Python.
**SQLite:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("sqlite://") # in memory
# or
db = SQLAlchemy("sqlite:///foo.db") # DB file
**PostgreSql:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("postgresql+pg8000://user:password@host:port/dbname")
**PyMySQL:**
from active_sqlalchemy import SQLAlchemy
db = SQLAlchemy("mysql+pymysql://user:password@host:port/dbname")
---
Active-SQLAlchemy also provides access to all the SQLAlchemy
functions from the ``sqlalchemy`` and ``sqlalchemy.orm`` modules.
So you can declare models like the following examples:
### Create a Model
To start, create a model class and extends it with db.Model
# mymodel.py
from active_sqlachemy import SQLAlchemy
db = SQLAlchemy("sqlite://")
class MyModel(db.Model):
name = db.Column(db.String(25))
is_live = db.Column(db.Boolean, default=False)
# Put at the end of the model module to auto create all models
db.create_all()
- Upon creation of the table, db.Model will add the following columns: ``id``, ``created_at``, ``upated_at``, ``is_deleted``, ``deleted_at``
- It does an automatic table naming (if no table name is already defined using the ``__tablename__`` property)
by using the class name. So, for example, a ``User`` model gets a table named ``user``, ``TodoList`` becomes ``todo_list``
The name will not be plurialized.
---
## Models: *db.Model*
**db.Model** extends your model with helpers that turn your model into an active record like model. But underneath, it still uses the ``db.session``
**db.Model** also adds a few preset columns on the table:
``id``: The primary key
``created_at``: Datetime. It contains the creation date of the record
``updated_at``: Datetime. It is updated whenever the record is updated.
``deleted_at``: Datetime. Contains the datetime the record was soft-deleted.
``is_deleted``: Boolean. A flag to set if record is soft-deleted or not
**-- About Soft Delete --**
By definition, soft-delete marks a record as deleted so it doesn't get queried, but it still exists in the database. To actually delete the record itself, a hard delete must apply.
By default, when a record is deleted, **Active-SQLAlchemy** actually sets ``is_deleted`` to True and excludes it from being queried, and ``deleted_at`` is also set. But this happens only when using the method ``db.Model.delete()``.
When a record is soft-deleted, you can also undelete a record by doing: ``db.Model.delete(False)``
Now, to totally delete off the table, ``db.Model.delete(hard_delete=True)``
**-- Querying with *db.Model.all()* --**
Due to the fact that **Active-SQLAlchemy** has soft-delete, to query a model without the soft-deleted records, you must query your model by using the ``all(*args, **kwargs)`` which returns a db.session.query object for you to apply filter on etc.
**-- db.BaseModel --**
By default ``db.Model`` adds several preset columns on the table, if you don't want to have them in your model, you can use instead ``db.BaseModel``, which still give you access to the methods to query your model.
---
### db.Model Methods Description
**all(\*args, \*\*kwargs)**
Returns a ``db.session.query`` object to filter or apply more conditions.
all = User.all()
for user in all:
print(user.login)
By default all() will show only all non-soft-delete records. To display both deleted and non deleted items, add the arg: ``include_deleted=True``
all = User.all(include_deleted=True)
for user in all:
print(user.login)
Use all to select columns etc
all = User.all(User.name.distinct(), User.location)
for user in all:
print(user.login)
Use all for complete filter
all = User.all(User.name.distinct, User.location).order_by(User.updated_at.desc()).filter(User.location == "Charlotte")
**get(id)**
Get one record by id. By default it will query only a record that is not soft-deleted
id = 1234
user = User.get(id)
print(user.id)
print(user.login)
To query a record that has been soft deleted, just set the argument ``include_deleted=True``
id = 234
user = User.get(id, include_deleted=True)
**create(\*\*kwargs)**
To create/insert new record. Same as __init__, but just a shortcut to it.
record = User.create(login='abc', passw_hash='hash', profile_id=123)
print (record.login) # -> abc
or you can use the __init__ with save()
record = User(login='abc', passw_hash='hash', profile_id=123).save()
print (record.login) # -> abc
or
record = User(login='abc', passw_hash='hash', profile_id=123)
record.save()
print (record.login) # -> abc
**update(\*\*kwargs)**
Update an existing record
record = User.get(124)
record.update(login='new_login')
print (record.login) # -> new_login
**delete()**
To soft delete a record. ``is_deleted`` will be set to True and ``deleted_at`` datetime will be set
record = User.get(124)
record.delete()
print (record.is_deleted) # -> True
To soft UNdelete a record. ``is_deleted`` will be set to False and ``deleted_at`` datetime will be None
record = User.get(124)
record.delete(delete=False)
print (record.is_deleted) # -> False
To HARD delete a record. The record will be deleted completely
record = User.get(124)
record.delete(hard_delete=True)
**save()**
A shortcut to ``session.add`` + ``session.commit()``
record = User.get(124)
record.login = "Another one"
record.save()
---
#### Method Chaining
For convenience, some method chaining are available
user = User(name="Mardix", location="Charlotte").save()
User.get(12345).update(location="Atlanta")
User.get(345).delete().delete(False).update(location="St. Louis")
---
#### Aggegated selects
class Product(db.Model):
name = db.Column(db.String(250))
price = db.Column(db.Numeric)
results = Product.all(db.func.sum(Unit.price).label('price'))
---
## With Web Application
In a web application you need to call ``db.session.remove()`` after each response, and ``db.session.rollback()`` if an error occurs. However, if you are using Flask or other framework that uses the `after_request` and ``on_exception`` decorators, these bindings it is done automatically.
For example using Flask, you can do:
app = Flask(__name__)
db = SQLAlchemy('sqlite://', app=app)
or
db = SQLAlchemy()
app = Flask(__name__)
db.init_app(app)
### More examples
####Many databases, one web app
app = Flask(__name__)
db1 = SQLAlchemy(URI1, app)
db2 = SQLAlchemy(URI2, app)
####Many web apps, one database
db = SQLAlchemy(URI1)
app1 = Flask(__name__)
app2 = Flask(__name__)
db.init_app(app1)
db.init_app(app2)
---
## Pagination
All the results can be easily paginated
users = User.paginate(page=2, per_page=20)
print(list(users)) # [User(21), User(22), User(23), ... , User(40)]
The paginator object it's an iterable that returns only the results for that page, so you use it in your templates in the same way than the original result:
{% for item in paginated_items %}
<li>{{ item.name }}</li>
{% endfor %}
Rendering the pages
Below your results is common that you want it to render the list of pages.
The ``paginator.pages`` property is an iterator that returns the page numbers, but sometimes not all of them: if there are more than 11 pages, the result will be one of these, depending of what is the current page:
Skipped page numbers are represented as ``None``.
How many items are displayed can be controlled calling ``paginator.iter_pages`` instead.
This is one way how you could render such a pagination in your templates:
{% macro render_paginator(paginator, endpoint) %}
<p>Showing {{ paginator.showing }} or {{ paginator.total }}</p>
<ol class="paginator">
{%- if paginator.has_prev %}
<li><a href="{{ url_for(endpoint, page=paginator.prev_num) }}"
rel="me prev">«</a></li>
{% else %}
<li class="disabled"><span>«</span></li>
{%- endif %}
{%- for page in paginator.pages %}
{% if page %}
{% if page != paginator.page %}
<li><a href="{{ url_for(endpoint, page=page) }}"
rel="me">{{ page }}</a></li>
{% else %}
<li class="current"><span>{{ page }}</span></li>
{% endif %}
{% else %}
<li><span class=ellipsis>…</span></li>
{% endif %}
{%- endfor %}
{%- if paginator.has_next %}
<li><a href="{{ url_for(endpoint, page=paginator.next_num) }}"
rel="me next">»</a></li>
{% else %}
<li class="disabled"><span>»</span></li>
{%- endif %}
</ol>
{% endmacro %}
______
####Credits:
[SQLAlchemy](http://www.sqlalchemy.org/)
[Flask-SQLAlchemy](https://pythonhosted.org/Flask-SQLAlchemy)
[SQLAlchemy-Wrapper](https://github.com/lucuma/sqlalchemy-wrapper)
---
copyright: 2015
license: MIT, see LICENSE for more details.
| PypiClean |
/HalWeb-0.6.0.tar.gz/HalWeb-0.6.0/src/halicea/baseProject/models/ShellModels.py | import pickle
from google.appengine.ext import db
class Session(db.Model):
"""A shell session. Stores the session's globals.
Each session globals is stored in one of two places:
If the global is picklable, it's stored in the parallel globals and
global_names list properties. (They're parallel lists to work around the
unfortunate fact that the datastore can't store dictionaries natively.)
If the global is not picklable (e.g. modules, classes, and functions), or if
it was created by the same statement that created an unpicklable global,
it's not stored directly. Instead, the statement is stored in the
unpicklables list property. On each request, before executing the current
statement, the unpicklable statements are evaluated to recreate the
unpicklable globals.
The unpicklable_names property stores all of the names of globals that were
added by unpicklable statements. When we pickle and store the globals after
executing a statement, we skip the ones in unpicklable_names.
Using Text instead of string is an optimization. We don't query on any of
these properties, so they don't need to be indexed.
"""
global_names = db.ListProperty(db.Text)
globals = db.ListProperty(db.Blob)
unpicklable_names = db.ListProperty(db.Text)
unpicklables = db.ListProperty(db.Text)
def set_global(self, name, value):
"""Adds a global, or updates it if it already exists.
Also removes the global from the list of unpicklable names.
Args:
name: the name of the global to remove
value: any picklable value
"""
blob = db.Blob(pickle.dumps(value))
if name in self.global_names:
index = self.global_names.index(name)
self.globals[index] = blob
else:
self.global_names.append(db.Text(name))
self.globals.append(blob)
self.remove_unpicklable_name(name)
def remove_global(self, name):
"""Removes a global, if it exists.
Args:
name: string, the name of the global to remove
"""
if name in self.global_names:
index = self.global_names.index(name)
del self.global_names[index]
del self.globals[index]
def globals_dict(self):
"""Returns a dictionary view of the globals.
"""
return dict((name, pickle.loads(val))
for name, val in zip(self.global_names, self.globals))
def add_unpicklable(self, statement, names):
"""Adds a statement and list of names to the unpicklables.
Also removes the names from the globals.
Args:
statement: string, the statement that created new unpicklable global(s).
names: list of strings; the names of the globals created by the statement.
"""
self.unpicklables.append(db.Text(statement))
for name in names:
self.remove_global(name)
if name not in self.unpicklable_names:
self.unpicklable_names.append(db.Text(name))
def remove_unpicklable_name(self, name):
"""Removes a name from the list of unpicklable names, if it exists.
Args:
name: string, the name of the unpicklable global to remove
"""
if name in self.unpicklable_names:
self.unpicklable_names.remove(name) | PypiClean |
/Flask_Unchained-0.9.0-py3-none-any.whl/flask_mail.py | import blinker
import re
import smtplib
import time
import unicodedata
from contextlib import contextmanager
from email import charset
from email.encoders import encode_base64
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.policy import SMTP
from email.utils import formataddr, formatdate, make_msgid, parseaddr
from flask import current_app
__version__ = '0.9.3'
charset.add_charset('utf-8', charset.SHORTEST, None, 'utf-8')
class FlaskMailUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (
original, self.obj, type(self.obj)
)
def force_text(s, encoding='utf-8', errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, str):
return s
try:
if not isinstance(s, str):
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
else:
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise FlaskMailUnicodeDecodeError(s, *e.args)
else:
s = ' '.join(force_text(arg, encoding, errors) for arg in s)
return s
def sanitize_subject(subject, encoding='utf-8'):
try:
subject.encode('ascii')
except UnicodeEncodeError:
try:
subject = Header(subject, encoding).encode()
except UnicodeEncodeError:
subject = Header(subject, 'utf-8').encode()
return subject
def sanitize_address(addr, encoding='utf-8'):
if isinstance(addr, str):
addr = parseaddr(force_text(addr))
nm, addr = addr
try:
nm = Header(nm, encoding).encode()
except UnicodeEncodeError:
nm = Header(nm, 'utf-8').encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
try:
localpart = Header(localpart, encoding).encode()
except UnicodeEncodeError:
localpart = Header(localpart, 'utf-8').encode()
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
def sanitize_addresses(addresses, encoding='utf-8'):
return map(lambda e: sanitize_address(e, encoding), addresses)
def fix_recipients_list(recipients):
fixed_recipients = []
for recipient in recipients:
if not isinstance(recipient, str):
# Ensure that the name/email values are a tuple and not a list
fixed_recipients.append(tuple(recipient))
else:
fixed_recipients.append(recipient)
return fixed_recipients
def _has_newline(line):
"""Used by has_bad_header to check for \\r or \\n"""
if line and ('\r' in line or '\n' in line):
return True
return False
class Connection:
"""Handles connection to host."""
def __init__(self, mail):
self.mail = mail
def __enter__(self):
if self.mail.suppress:
self.host = None
else:
self.host = self.configure_host()
self.num_emails = 0
return self
def __exit__(self, exc_type, exc_value, tb):
if self.host and getattr(self.host, 'sock', None):
try:
self.host.quit()
except smtplib.SMTPServerDisconnected:
pass
def configure_host(self):
if self.mail.use_ssl:
host = smtplib.SMTP_SSL(self.mail.server, self.mail.port)
else:
host = smtplib.SMTP(self.mail.server, self.mail.port)
host.set_debuglevel(int(self.mail.debug))
if self.mail.use_tls:
(resp, reply) = host.starttls()
# Fix CVE-2016-0772 on old Python installations
if resp != 200:
raise smtplib.SMTPResponseException(resp, reply)
if self.mail.username and self.mail.password:
host.login(self.mail.username, self.mail.password)
return host
def send(self, message, envelope_from=None):
"""Verifies and sends message.
:param message: Message instance.
:param envelope_from: Email address to be used in MAIL FROM command.
"""
if not message.send_to:
raise ValueError("No recipients have been added")
if message.sender is None:
raise ValueError("The message does not specify a sender and a default "
"sender has not been configured")
if message.has_bad_headers():
raise BadHeaderError
if message.date is None:
message.date = time.time()
ret = None
if self.host:
ret = self.host.sendmail(
sanitize_address(envelope_from or message.sender),
list(sanitize_addresses(message.send_to)),
message.as_bytes(),
message.mail_options,
message.rcpt_options
)
email_dispatched.send(message, app=current_app._get_current_object())
self.num_emails += 1
if self.num_emails == self.mail.max_emails:
self.num_emails = 0
if self.host:
self.host.quit()
self.host = self.configure_host()
return ret
def send_message(self, *args, **kwargs):
"""Shortcut for send(msg).
Takes same arguments as Message constructor.
:versionadded: 0.3.5
"""
return self.send(Message(*args, **kwargs))
class BadHeaderError(Exception):
pass
class Attachment:
"""Encapsulates file attachment information.
:versionadded: 0.3.5
:param filename: filename of attachment
:param content_type: file mimetype
:param data: the raw file data
:param disposition: content-disposition (if any)
:param content_id: content-id for inline reference
"""
def __init__(self, filename=None, content_type=None, data=None,
disposition=None, headers=None, content_id=None):
self.filename = filename
self.content_type = content_type
self.data = data
self.disposition = disposition or 'attachment'
self.headers = headers or {}
self.content_id = content_id
class Message:
"""Encapsulates an email message.
:param subject: email subject header
:param recipients: list of email addresses
:param body: plain text message
:param html: HTML message
:param alts: A dict or an iterable to go through dict() that contains
multipart alternatives
:param sender: email sender address, or **MAIL_DEFAULT_SENDER** by default
:param cc: CC list
:param bcc: BCC list
:param attachments: list of Attachment instances
:param reply_to: reply-to address
:param date: send date
:param charset: message character set
:param extra_headers: A dictionary of additional headers for the message
:param mail_options: A list of ESMTP options to be used in MAIL FROM
:param rcpt_options: A list of ESMTP options to be used in RCPT commands
:param subtype: Media subtype name for a message
"""
def __init__(self, subject='',
recipients=None,
body=None,
html=None,
alts=None,
sender=None,
cc=None,
bcc=None,
attachments=None,
reply_to=None,
date=None,
charset=None,
extra_headers=None,
mail_options=None,
rcpt_options=None,
subtype=None):
sender = sender or current_app.extensions['mail'].default_sender
if isinstance(sender, tuple):
sender = "%s <%s>" % sender
self.recipients = recipients or []
self.subject = subject
self.sender = sender
self.reply_to = reply_to
self.cc = cc or []
self.bcc = bcc or []
self.body = body
self.alts = dict(alts or {})
self.html = html
self.date = date
self.msgId = make_msgid()
self.charset = charset
self.extra_headers = extra_headers
self.subtype = subtype
self.mail_options = mail_options or []
self.rcpt_options = rcpt_options or []
self.attachments = attachments or []
@property
def recipients(self):
return self._recipients
@recipients.setter
def recipients(self, recipients):
self._recipients = fix_recipients_list(recipients)
@property
def cc(self):
return self._cc
@cc.setter
def cc(self, recipients):
self._cc = fix_recipients_list(recipients)
@property
def bcc(self):
return self._bcc
@bcc.setter
def bcc(self, recipients):
self._bcc = fix_recipients_list(recipients)
@property
def send_to(self):
return set(self.recipients) | set(self.bcc or ()) | set(self.cc or ())
@property
def html(self):
return self.alts.get('html')
@html.setter
def html(self, value):
if value is None:
self.alts.pop('html', None)
else:
self.alts['html'] = value
def _mimetext(self, text, subtype=None):
"""Creates a MIMEText object with the given subtype (default: 'plain')
If the text is unicode, the utf-8 charset is used.
"""
subtype = subtype or 'plain'
charset = self.charset or 'utf-8'
return MIMEText(text, _subtype=subtype, _charset=charset)
def _message(self):
"""Creates the email"""
ascii_attachments = current_app.extensions['mail'].ascii_attachments
encoding = self.charset or 'utf-8'
attachments = self.attachments or []
if not attachments and not self.alts:
# No html content and zero attachments means plain text
msg = self._mimetext(self.body, self.subtype)
elif attachments and not self.alts:
# No html and at least one attachment means multipart
subtype = self.subtype or 'mixed'
msg = MIMEMultipart(_subtype=subtype)
msg.attach(self._mimetext(self.body))
else:
# Anything else
subtype = self.subtype or 'mixed'
msg = MIMEMultipart(_subtype=subtype)
alternative = MIMEMultipart(_subtype='alternative')
alternative.attach(self._mimetext(self.body))
for mimetype, content in self.alts.items():
alternative.attach(self._mimetext(content, mimetype))
msg.attach(alternative)
if self.subject:
msg['Subject'] = sanitize_subject(force_text(self.subject),
encoding)
msg['From'] = sanitize_address(self.sender, encoding)
msg['To'] = ', '.join(
list(set(sanitize_addresses(self.recipients, encoding)))
)
msg['Date'] = formatdate(self.date, localtime=True)
# see RFC 5322 section 3.6.4.
msg['Message-ID'] = self.msgId
if self.cc:
msg['Cc'] = ', '.join(
list(set(sanitize_addresses(self.cc, encoding)))
)
if self.reply_to:
msg['Reply-To'] = sanitize_address(self.reply_to, encoding)
if self.extra_headers:
for k, v in self.extra_headers.items():
msg[k] = v
SPACES = re.compile(r'[\s]+', re.UNICODE)
for attachment in attachments:
f = MIMEBase(*attachment.content_type.split('/'))
f.set_payload(attachment.data)
encode_base64(f)
filename = attachment.filename
if filename and ascii_attachments:
# force filename to ascii
filename = unicodedata.normalize('NFKD', filename)
filename = filename.encode('ascii', 'ignore').decode('ascii')
filename = SPACES.sub(u' ', filename).strip()
try:
filename and filename.encode('ascii')
except UnicodeEncodeError:
filename = ('UTF8', '', filename)
f.add_header('Content-Disposition',
attachment.disposition,
filename=filename)
for key, value in attachment.headers.items():
f.add_header(key, value)
if attachment.content_id:
try:
f.replace_header('Content-ID', attachment.content_id)
except KeyError:
f.add_header('Content-ID', attachment.content_id)
msg.attach(f)
msg.policy = SMTP
return msg
def as_string(self):
return self._message().as_string()
def as_bytes(self):
return self._message().as_string().encode(self.charset or 'utf-8')
def __str__(self):
return self.as_string()
def __bytes__(self):
return self.as_bytes()
def has_bad_headers(self):
"""
Checks for bad headers i.e. newlines in subject, sender or recipients.
RFC5322 allows multiline CRLF with trailing whitespace (FWS) in headers
"""
headers = [self.sender, self.reply_to] + self.recipients
for header in headers:
if _has_newline(header):
return True
if self.subject:
if _has_newline(self.subject):
for linenum, line in enumerate(self.subject.split('\r\n')):
if not line:
return True
if linenum > 0 and line[0] not in '\t ':
return True
if _has_newline(line):
return True
if not line.strip():
return True
return False
def is_bad_headers(self):
from warnings import warn
warn(DeprecationWarning('is_bad_headers is deprecated, use the'
' new has_bad_headers method instead.'),
stacklevel=1)
return self.has_bad_headers()
def send(self, connection):
"""
Verifies and sends the message.
"""
return connection.send(self)
def add_recipient(self, recipient):
"""
Adds another recipient to the message.
:param recipient: email address of recipient.
"""
self.recipients.append(recipient)
def attach(self,
filename=None,
content_type=None,
data=None,
disposition=None,
headers=None,
content_id=None):
"""
Adds an attachment to the message.
:param filename: filename of attachment
:param content_type: file mimetype
:param data: the raw file data
:param disposition: content-disposition (if any)
:param content_id: content-id
"""
self.attachments.append(
Attachment(filename, content_type, data, disposition,
headers, content_id)
)
class _MailMixin:
@contextmanager
def record_messages(self):
"""
Records all messages. Use in unit tests for example::
with mail.record_messages() as outbox:
response = app.test_client.get("/email-sending-view/")
assert len(outbox) == 1
assert outbox[0].subject == "testing"
You must have blinker installed in order to use this feature.
:versionadded: 0.4
"""
if not email_dispatched:
raise RuntimeError("blinker must be installed")
outbox = []
def _record(message, app): # skipcq: PYL-W0613 (unused arg)
outbox.append(message)
email_dispatched.connect(_record)
try:
yield outbox
finally:
email_dispatched.disconnect(_record)
def send(self, message):
"""
Sends a single message instance. If TESTING is True the message will
not actually be sent.
:param message: a Message instance.
"""
with self.connect() as connection:
return message.send(connection)
def send_message(self, *args, **kwargs):
"""
Shortcut for send(msg).
Takes same arguments as Message constructor.
:versionadded: 0.3.5
"""
return self.send(Message(*args, **kwargs))
def connect(self):
"""
Opens a connection to the mail host.
"""
app = getattr(self, "app", None) or current_app
try:
return Connection(app.extensions['mail'])
except KeyError:
raise RuntimeError("The curent application was"
" not configured with Flask-Mail")
class _Mail(_MailMixin):
def __init__(self, server, username, password, port, use_tls, use_ssl,
default_sender, debug, max_emails, suppress,
ascii_attachments=False):
self.server = server
self.username = username
self.password = password
self.port = port
self.use_tls = use_tls
self.use_ssl = use_ssl
self.default_sender = default_sender
self.debug = debug
self.max_emails = max_emails
self.suppress = suppress
self.ascii_attachments = ascii_attachments
class Mail(_MailMixin):
"""
Manages email messaging
:param app: Flask instance
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.state = self.init_app(app)
else:
self.state = None
def init_mail(self, config, debug=False, testing=False):
return _Mail(
config.get('MAIL_SERVER', '127.0.0.1'),
config.get('MAIL_USERNAME'),
config.get('MAIL_PASSWORD'),
config.get('MAIL_PORT', 25),
config.get('MAIL_USE_TLS', False),
config.get('MAIL_USE_SSL', False),
config.get('MAIL_DEFAULT_SENDER'),
int(config.get('MAIL_DEBUG', debug)),
config.get('MAIL_MAX_EMAILS'),
config.get('MAIL_SUPPRESS_SEND', testing),
config.get('MAIL_ASCII_ATTACHMENTS', False)
)
def init_app(self, app):
"""Initializes your mail settings from the application settings.
You can use this if you want to set up your Mail instance
at configuration time.
:param app: Flask application instance
"""
state = self.init_mail(app.config, app.debug, app.testing)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['mail'] = state
return state
def __getattr__(self, name):
return getattr(self.state, name, None)
signals = blinker.Namespace()
email_dispatched = signals.signal("email-dispatched", doc="""
Signal sent when an email is dispatched. This signal will also be sent
in testing mode, even though the email will not actually be sent.
""") | PypiClean |
/DBigBang-0.2.tar.gz/DBigBang-0.2/dbigbang/twopeople.py |
from pprint import pprint as pp
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pytz
import dbigbang.graph as graph
import dbigbang.mailman as mailman
import dbigbang.parse as parse
import dbigbang.process as process
from dbigbang.archive import Archive
# Gets the target two people A, B to analyze and returns
# the amount of time they communicated in the mailing list
# in TimeDelta type
def duration(exchanges, A, B):
AtoB = exchanges[exchanges["From_original"] == A]
AtoB = AtoB[AtoB["From_response"] == B]
BtoA = exchanges[exchanges["From_original"] == B]
BtoA = BtoA[BtoA["From_response"] == A]
if len(AtoB) == 0:
return max(BtoA["Date"]) - min(BtoA["Date"])
if len(BtoA) == 0:
return max(AtoB["Date"]) - min(AtoB["Date"])
return max(max(AtoB["Date"]), max(BtoA["Date"])) - min(
min(AtoB["Date"]), min(BtoA["Date"])
)
# Returns the number of replies that two people A and B sent to
# each other in a tuple (# of replies from A to B, # of replies from B to A)
def num_replies(exchanges, A, B):
AtoB = exchanges[exchanges["From_original"] == A]
AtoB = AtoB[AtoB["From_response"] == B]
BtoA = exchanges[exchanges["From_original"] == B]
BtoA = BtoA[BtoA["From_response"] == A]
return (len(AtoB), len(BtoA))
# Returns the reciprocity of communication between two people A and B
# in float type. This expresses how interactively they communicated to each
# other
def reciprocity(exchanges, A, B):
num = num_replies(exchanges, A, B)
return float(min(num)) / max(num)
# Finds every unique pair (A, B) from the pandas DataFrame "exchanges"
# and returns them in set data type
def unique_pairs(exchanges):
pairs = set()
total_responses = len(exchanges["From_original"])
for i in range(total_responses):
pair = (exchanges["From_original"][i], exchanges["From_response"][i])
pair_reversed = (
exchanges["From_response"][i],
exchanges["From_original"][i],
)
if pair_reversed not in pairs:
pairs.add(pair)
return pairs
# Forms a new Pandas DataFrame that contains information about communication
# between a pair A and B using functions provided above and returns the result
def panda_pair(exchanges, A, B):
try:
return pd.DataFrame(
[
{
"A": A,
"B": B,
"duration": duration(exchanges, A, B),
"num_replies": sum(num_replies(exchanges, A, B)),
"reciprocity": reciprocity(exchanges, A, B),
}
]
)
except Exception:
print('No exchange between "%s" and "%s" exists.' % (A, B))
# With given pairs of communication, returns a Pandas DataFrame that contains
# communication information between two people A and B in every pair
def panda_allpairs(exchanges, pairs):
data_list = []
for pair in pairs:
A = pair[0]
B = pair[1]
data_list.append(
{
"A": A,
"B": B,
"duration": duration(exchanges, A, B),
"num_replies": sum(num_replies(exchanges, A, B)),
"reciprocity": reciprocity(exchanges, A, B),
}
)
return pd.DataFrame(data_list) | PypiClean |
/MIDIUtil-1.2.1.tar.gz/MIDIUtil-1.2.1/documentation/_build/html/_static/underscore-1.3.1.js |
(function() {
// Baseline setup
// --------------
// Establish the root object, `window` in the browser, or `global` on the server.
var root = this;
// Save the previous value of the `_` variable.
var previousUnderscore = root._;
// Establish the object that gets returned to break out of a loop iteration.
var breaker = {};
// Save bytes in the minified (but not gzipped) version:
var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype;
// Create quick reference variables for speed access to core prototypes.
var slice = ArrayProto.slice,
unshift = ArrayProto.unshift,
toString = ObjProto.toString,
hasOwnProperty = ObjProto.hasOwnProperty;
// All **ECMAScript 5** native function implementations that we hope to use
// are declared here.
var
nativeForEach = ArrayProto.forEach,
nativeMap = ArrayProto.map,
nativeReduce = ArrayProto.reduce,
nativeReduceRight = ArrayProto.reduceRight,
nativeFilter = ArrayProto.filter,
nativeEvery = ArrayProto.every,
nativeSome = ArrayProto.some,
nativeIndexOf = ArrayProto.indexOf,
nativeLastIndexOf = ArrayProto.lastIndexOf,
nativeIsArray = Array.isArray,
nativeKeys = Object.keys,
nativeBind = FuncProto.bind;
// Create a safe reference to the Underscore object for use below.
var _ = function(obj) { return new wrapper(obj); };
// Export the Underscore object for **Node.js**, with
// backwards-compatibility for the old `require()` API. If we're in
// the browser, add `_` as a global object via a string identifier,
// for Closure Compiler "advanced" mode.
if (typeof exports !== 'undefined') {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = _;
}
exports._ = _;
} else {
root['_'] = _;
}
// Current version.
_.VERSION = '1.3.1';
// Collection Functions
// --------------------
// The cornerstone, an `each` implementation, aka `forEach`.
// Handles objects with the built-in `forEach`, arrays, and raw objects.
// Delegates to **ECMAScript 5**'s native `forEach` if available.
var each = _.each = _.forEach = function(obj, iterator, context) {
if (obj == null) return;
if (nativeForEach && obj.forEach === nativeForEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, l = obj.length; i < l; i++) {
if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
for (var key in obj) {
if (_.has(obj, key)) {
if (iterator.call(context, obj[key], key, obj) === breaker) return;
}
}
}
};
// Return the results of applying the iterator to each element.
// Delegates to **ECMAScript 5**'s native `map` if available.
_.map = _.collect = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (nativeMap && obj.map === nativeMap) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results[results.length] = iterator.call(context, value, index, list);
});
if (obj.length === +obj.length) results.length = obj.length;
return results;
};
// **Reduce** builds up a single result from a list of values, aka `inject`,
// or `foldl`. Delegates to **ECMAScript 5**'s native `reduce` if available.
_.reduce = _.foldl = _.inject = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduce && obj.reduce === nativeReduce) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduce(iterator, memo) : obj.reduce(iterator);
}
each(obj, function(value, index, list) {
if (!initial) {
memo = value;
initial = true;
} else {
memo = iterator.call(context, memo, value, index, list);
}
});
if (!initial) throw new TypeError('Reduce of empty array with no initial value');
return memo;
};
// The right-associative version of reduce, also known as `foldr`.
// Delegates to **ECMAScript 5**'s native `reduceRight` if available.
_.reduceRight = _.foldr = function(obj, iterator, memo, context) {
var initial = arguments.length > 2;
if (obj == null) obj = [];
if (nativeReduceRight && obj.reduceRight === nativeReduceRight) {
if (context) iterator = _.bind(iterator, context);
return initial ? obj.reduceRight(iterator, memo) : obj.reduceRight(iterator);
}
var reversed = _.toArray(obj).reverse();
if (context && !initial) iterator = _.bind(iterator, context);
return initial ? _.reduce(reversed, iterator, memo, context) : _.reduce(reversed, iterator);
};
// Return the first value which passes a truth test. Aliased as `detect`.
_.find = _.detect = function(obj, iterator, context) {
var result;
any(obj, function(value, index, list) {
if (iterator.call(context, value, index, list)) {
result = value;
return true;
}
});
return result;
};
// Return all the elements that pass a truth test.
// Delegates to **ECMAScript 5**'s native `filter` if available.
// Aliased as `select`.
_.filter = _.select = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (nativeFilter && obj.filter === nativeFilter) return obj.filter(iterator, context);
each(obj, function(value, index, list) {
if (iterator.call(context, value, index, list)) results[results.length] = value;
});
return results;
};
// Return all the elements for which a truth test fails.
_.reject = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
each(obj, function(value, index, list) {
if (!iterator.call(context, value, index, list)) results[results.length] = value;
});
return results;
};
// Determine whether all of the elements match a truth test.
// Delegates to **ECMAScript 5**'s native `every` if available.
// Aliased as `all`.
_.every = _.all = function(obj, iterator, context) {
var result = true;
if (obj == null) return result;
if (nativeEvery && obj.every === nativeEvery) return obj.every(iterator, context);
each(obj, function(value, index, list) {
if (!(result = result && iterator.call(context, value, index, list))) return breaker;
});
return result;
};
// Determine if at least one element in the object matches a truth test.
// Delegates to **ECMAScript 5**'s native `some` if available.
// Aliased as `any`.
var any = _.some = _.any = function(obj, iterator, context) {
iterator || (iterator = _.identity);
var result = false;
if (obj == null) return result;
if (nativeSome && obj.some === nativeSome) return obj.some(iterator, context);
each(obj, function(value, index, list) {
if (result || (result = iterator.call(context, value, index, list))) return breaker;
});
return !!result;
};
// Determine if a given value is included in the array or object using `===`.
// Aliased as `contains`.
_.include = _.contains = function(obj, target) {
var found = false;
if (obj == null) return found;
if (nativeIndexOf && obj.indexOf === nativeIndexOf) return obj.indexOf(target) != -1;
found = any(obj, function(value) {
return value === target;
});
return found;
};
// Invoke a method (with arguments) on every item in a collection.
_.invoke = function(obj, method) {
var args = slice.call(arguments, 2);
return _.map(obj, function(value) {
return (_.isFunction(method) ? method || value : value[method]).apply(value, args);
});
};
// Convenience version of a common use case of `map`: fetching a property.
_.pluck = function(obj, key) {
return _.map(obj, function(value){ return value[key]; });
};
// Return the maximum element or (element-based computation).
_.max = function(obj, iterator, context) {
if (!iterator && _.isArray(obj)) return Math.max.apply(Math, obj);
if (!iterator && _.isEmpty(obj)) return -Infinity;
var result = {computed : -Infinity};
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
computed >= result.computed && (result = {value : value, computed : computed});
});
return result.value;
};
// Return the minimum element (or element-based computation).
_.min = function(obj, iterator, context) {
if (!iterator && _.isArray(obj)) return Math.min.apply(Math, obj);
if (!iterator && _.isEmpty(obj)) return Infinity;
var result = {computed : Infinity};
each(obj, function(value, index, list) {
var computed = iterator ? iterator.call(context, value, index, list) : value;
computed < result.computed && (result = {value : value, computed : computed});
});
return result.value;
};
// Shuffle an array.
_.shuffle = function(obj) {
var shuffled = [], rand;
each(obj, function(value, index, list) {
if (index == 0) {
shuffled[0] = value;
} else {
rand = Math.floor(Math.random() * (index + 1));
shuffled[index] = shuffled[rand];
shuffled[rand] = value;
}
});
return shuffled;
};
// Sort the object's values by a criterion produced by an iterator.
_.sortBy = function(obj, iterator, context) {
return _.pluck(_.map(obj, function(value, index, list) {
return {
value : value,
criteria : iterator.call(context, value, index, list)
};
}).sort(function(left, right) {
var a = left.criteria, b = right.criteria;
return a < b ? -1 : a > b ? 1 : 0;
}), 'value');
};
// Groups the object's values by a criterion. Pass either a string attribute
// to group by, or a function that returns the criterion.
_.groupBy = function(obj, val) {
var result = {};
var iterator = _.isFunction(val) ? val : function(obj) { return obj[val]; };
each(obj, function(value, index) {
var key = iterator(value, index);
(result[key] || (result[key] = [])).push(value);
});
return result;
};
// Use a comparator function to figure out at what index an object should
// be inserted so as to maintain order. Uses binary search.
_.sortedIndex = function(array, obj, iterator) {
iterator || (iterator = _.identity);
var low = 0, high = array.length;
while (low < high) {
var mid = (low + high) >> 1;
iterator(array[mid]) < iterator(obj) ? low = mid + 1 : high = mid;
}
return low;
};
// Safely convert anything iterable into a real, live array.
_.toArray = function(iterable) {
if (!iterable) return [];
if (iterable.toArray) return iterable.toArray();
if (_.isArray(iterable)) return slice.call(iterable);
if (_.isArguments(iterable)) return slice.call(iterable);
return _.values(iterable);
};
// Return the number of elements in an object.
_.size = function(obj) {
return _.toArray(obj).length;
};
// Array Functions
// ---------------
// Get the first element of an array. Passing **n** will return the first N
// values in the array. Aliased as `head`. The **guard** check allows it to work
// with `_.map`.
_.first = _.head = function(array, n, guard) {
return (n != null) && !guard ? slice.call(array, 0, n) : array[0];
};
// Returns everything but the last entry of the array. Especcialy useful on
// the arguments object. Passing **n** will return all the values in
// the array, excluding the last N. The **guard** check allows it to work with
// `_.map`.
_.initial = function(array, n, guard) {
return slice.call(array, 0, array.length - ((n == null) || guard ? 1 : n));
};
// Get the last element of an array. Passing **n** will return the last N
// values in the array. The **guard** check allows it to work with `_.map`.
_.last = function(array, n, guard) {
if ((n != null) && !guard) {
return slice.call(array, Math.max(array.length - n, 0));
} else {
return array[array.length - 1];
}
};
// Returns everything but the first entry of the array. Aliased as `tail`.
// Especially useful on the arguments object. Passing an **index** will return
// the rest of the values in the array from that index onward. The **guard**
// check allows it to work with `_.map`.
_.rest = _.tail = function(array, index, guard) {
return slice.call(array, (index == null) || guard ? 1 : index);
};
// Trim out all falsy values from an array.
_.compact = function(array) {
return _.filter(array, function(value){ return !!value; });
};
// Return a completely flattened version of an array.
_.flatten = function(array, shallow) {
return _.reduce(array, function(memo, value) {
if (_.isArray(value)) return memo.concat(shallow ? value : _.flatten(value));
memo[memo.length] = value;
return memo;
}, []);
};
// Return a version of the array that does not contain the specified value(s).
_.without = function(array) {
return _.difference(array, slice.call(arguments, 1));
};
// Produce a duplicate-free version of the array. If the array has already
// been sorted, you have the option of using a faster algorithm.
// Aliased as `unique`.
_.uniq = _.unique = function(array, isSorted, iterator) {
var initial = iterator ? _.map(array, iterator) : array;
var result = [];
_.reduce(initial, function(memo, el, i) {
if (0 == i || (isSorted === true ? _.last(memo) != el : !_.include(memo, el))) {
memo[memo.length] = el;
result[result.length] = array[i];
}
return memo;
}, []);
return result;
};
// Produce an array that contains the union: each distinct element from all of
// the passed-in arrays.
_.union = function() {
return _.uniq(_.flatten(arguments, true));
};
// Produce an array that contains every item shared between all the
// passed-in arrays. (Aliased as "intersect" for back-compat.)
_.intersection = _.intersect = function(array) {
var rest = slice.call(arguments, 1);
return _.filter(_.uniq(array), function(item) {
return _.every(rest, function(other) {
return _.indexOf(other, item) >= 0;
});
});
};
// Take the difference between one array and a number of other arrays.
// Only the elements present in just the first array will remain.
_.difference = function(array) {
var rest = _.flatten(slice.call(arguments, 1));
return _.filter(array, function(value){ return !_.include(rest, value); });
};
// Zip together multiple lists into a single array -- elements that share
// an index go together.
_.zip = function() {
var args = slice.call(arguments);
var length = _.max(_.pluck(args, 'length'));
var results = new Array(length);
for (var i = 0; i < length; i++) results[i] = _.pluck(args, "" + i);
return results;
};
// If the browser doesn't supply us with indexOf (I'm looking at you, **MSIE**),
// we need this function. Return the position of the first occurrence of an
// item in an array, or -1 if the item is not included in the array.
// Delegates to **ECMAScript 5**'s native `indexOf` if available.
// If the array is large and already in sort order, pass `true`
// for **isSorted** to use binary search.
_.indexOf = function(array, item, isSorted) {
if (array == null) return -1;
var i, l;
if (isSorted) {
i = _.sortedIndex(array, item);
return array[i] === item ? i : -1;
}
if (nativeIndexOf && array.indexOf === nativeIndexOf) return array.indexOf(item);
for (i = 0, l = array.length; i < l; i++) if (i in array && array[i] === item) return i;
return -1;
};
// Delegates to **ECMAScript 5**'s native `lastIndexOf` if available.
_.lastIndexOf = function(array, item) {
if (array == null) return -1;
if (nativeLastIndexOf && array.lastIndexOf === nativeLastIndexOf) return array.lastIndexOf(item);
var i = array.length;
while (i--) if (i in array && array[i] === item) return i;
return -1;
};
// Generate an integer Array containing an arithmetic progression. A port of
// the native Python `range()` function. See
// [the Python documentation](http://docs.python.org/library/functions.html#range).
_.range = function(start, stop, step) {
if (arguments.length <= 1) {
stop = start || 0;
start = 0;
}
step = arguments[2] || 1;
var len = Math.max(Math.ceil((stop - start) / step), 0);
var idx = 0;
var range = new Array(len);
while(idx < len) {
range[idx++] = start;
start += step;
}
return range;
};
// Function (ahem) Functions
// ------------------
// Reusable constructor function for prototype setting.
var ctor = function(){};
// Create a function bound to a given object (assigning `this`, and arguments,
// optionally). Binding with arguments is also known as `curry`.
// Delegates to **ECMAScript 5**'s native `Function.bind` if available.
// We check for `func.bind` first, to fail fast when `func` is undefined.
_.bind = function bind(func, context) {
var bound, args;
if (func.bind === nativeBind && nativeBind) return nativeBind.apply(func, slice.call(arguments, 1));
if (!_.isFunction(func)) throw new TypeError;
args = slice.call(arguments, 2);
return bound = function() {
if (!(this instanceof bound)) return func.apply(context, args.concat(slice.call(arguments)));
ctor.prototype = func.prototype;
var self = new ctor;
var result = func.apply(self, args.concat(slice.call(arguments)));
if (Object(result) === result) return result;
return self;
};
};
// Bind all of an object's methods to that object. Useful for ensuring that
// all callbacks defined on an object belong to it.
_.bindAll = function(obj) {
var funcs = slice.call(arguments, 1);
if (funcs.length == 0) funcs = _.functions(obj);
each(funcs, function(f) { obj[f] = _.bind(obj[f], obj); });
return obj;
};
// Memoize an expensive function by storing its results.
_.memoize = function(func, hasher) {
var memo = {};
hasher || (hasher = _.identity);
return function() {
var key = hasher.apply(this, arguments);
return _.has(memo, key) ? memo[key] : (memo[key] = func.apply(this, arguments));
};
};
// Delays a function for the given number of milliseconds, and then calls
// it with the arguments supplied.
_.delay = function(func, wait) {
var args = slice.call(arguments, 2);
return setTimeout(function(){ return func.apply(func, args); }, wait);
};
// Defers a function, scheduling it to run after the current call stack has
// cleared.
_.defer = function(func) {
return _.delay.apply(_, [func, 1].concat(slice.call(arguments, 1)));
};
// Returns a function, that, when invoked, will only be triggered at most once
// during a given window of time.
_.throttle = function(func, wait) {
var context, args, timeout, throttling, more;
var whenDone = _.debounce(function(){ more = throttling = false; }, wait);
return function() {
context = this; args = arguments;
var later = function() {
timeout = null;
if (more) func.apply(context, args);
whenDone();
};
if (!timeout) timeout = setTimeout(later, wait);
if (throttling) {
more = true;
} else {
func.apply(context, args);
}
whenDone();
throttling = true;
};
};
// Returns a function, that, as long as it continues to be invoked, will not
// be triggered. The function will be called after it stops being called for
// N milliseconds.
_.debounce = function(func, wait) {
var timeout;
return function() {
var context = this, args = arguments;
var later = function() {
timeout = null;
func.apply(context, args);
};
clearTimeout(timeout);
timeout = setTimeout(later, wait);
};
};
// Returns a function that will be executed at most one time, no matter how
// often you call it. Useful for lazy initialization.
_.once = function(func) {
var ran = false, memo;
return function() {
if (ran) return memo;
ran = true;
return memo = func.apply(this, arguments);
};
};
// Returns the first function passed as an argument to the second,
// allowing you to adjust arguments, run code before and after, and
// conditionally execute the original function.
_.wrap = function(func, wrapper) {
return function() {
var args = [func].concat(slice.call(arguments, 0));
return wrapper.apply(this, args);
};
};
// Returns a function that is the composition of a list of functions, each
// consuming the return value of the function that follows.
_.compose = function() {
var funcs = arguments;
return function() {
var args = arguments;
for (var i = funcs.length - 1; i >= 0; i--) {
args = [funcs[i].apply(this, args)];
}
return args[0];
};
};
// Returns a function that will only be executed after being called N times.
_.after = function(times, func) {
if (times <= 0) return func();
return function() {
if (--times < 1) { return func.apply(this, arguments); }
};
};
// Object Functions
// ----------------
// Retrieve the names of an object's properties.
// Delegates to **ECMAScript 5**'s native `Object.keys`
_.keys = nativeKeys || function(obj) {
if (obj !== Object(obj)) throw new TypeError('Invalid object');
var keys = [];
for (var key in obj) if (_.has(obj, key)) keys[keys.length] = key;
return keys;
};
// Retrieve the values of an object's properties.
_.values = function(obj) {
return _.map(obj, _.identity);
};
// Return a sorted list of the function names available on the object.
// Aliased as `methods`
_.functions = _.methods = function(obj) {
var names = [];
for (var key in obj) {
if (_.isFunction(obj[key])) names.push(key);
}
return names.sort();
};
// Extend a given object with all the properties in passed-in object(s).
_.extend = function(obj) {
each(slice.call(arguments, 1), function(source) {
for (var prop in source) {
obj[prop] = source[prop];
}
});
return obj;
};
// Fill in a given object with default properties.
_.defaults = function(obj) {
each(slice.call(arguments, 1), function(source) {
for (var prop in source) {
if (obj[prop] == null) obj[prop] = source[prop];
}
});
return obj;
};
// Create a (shallow-cloned) duplicate of an object.
_.clone = function(obj) {
if (!_.isObject(obj)) return obj;
return _.isArray(obj) ? obj.slice() : _.extend({}, obj);
};
// Invokes interceptor with the obj, and then returns obj.
// The primary purpose of this method is to "tap into" a method chain, in
// order to perform operations on intermediate results within the chain.
_.tap = function(obj, interceptor) {
interceptor(obj);
return obj;
};
// Internal recursive comparison function.
function eq(a, b, stack) {
// Identical objects are equal. `0 === -0`, but they aren't identical.
// See the Harmony `egal` proposal: http://wiki.ecmascript.org/doku.php?id=harmony:egal.
if (a === b) return a !== 0 || 1 / a == 1 / b;
// A strict comparison is necessary because `null == undefined`.
if (a == null || b == null) return a === b;
// Unwrap any wrapped objects.
if (a._chain) a = a._wrapped;
if (b._chain) b = b._wrapped;
// Invoke a custom `isEqual` method if one is provided.
if (a.isEqual && _.isFunction(a.isEqual)) return a.isEqual(b);
if (b.isEqual && _.isFunction(b.isEqual)) return b.isEqual(a);
// Compare `[[Class]]` names.
var className = toString.call(a);
if (className != toString.call(b)) return false;
switch (className) {
// Strings, numbers, dates, and booleans are compared by value.
case '[object String]':
// Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is
// equivalent to `new String("5")`.
return a == String(b);
case '[object Number]':
// `NaN`s are equivalent, but non-reflexive. An `egal` comparison is performed for
// other numeric values.
return a != +a ? b != +b : (a == 0 ? 1 / a == 1 / b : a == +b);
case '[object Date]':
case '[object Boolean]':
// Coerce dates and booleans to numeric primitive values. Dates are compared by their
// millisecond representations. Note that invalid dates with millisecond representations
// of `NaN` are not equivalent.
return +a == +b;
// RegExps are compared by their source patterns and flags.
case '[object RegExp]':
return a.source == b.source &&
a.global == b.global &&
a.multiline == b.multiline &&
a.ignoreCase == b.ignoreCase;
}
if (typeof a != 'object' || typeof b != 'object') return false;
// Assume equality for cyclic structures. The algorithm for detecting cyclic
// structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`.
var length = stack.length;
while (length--) {
// Linear search. Performance is inversely proportional to the number of
// unique nested structures.
if (stack[length] == a) return true;
}
// Add the first object to the stack of traversed objects.
stack.push(a);
var size = 0, result = true;
// Recursively compare objects and arrays.
if (className == '[object Array]') {
// Compare array lengths to determine if a deep comparison is necessary.
size = a.length;
result = size == b.length;
if (result) {
// Deep compare the contents, ignoring non-numeric properties.
while (size--) {
// Ensure commutative equality for sparse arrays.
if (!(result = size in a == size in b && eq(a[size], b[size], stack))) break;
}
}
} else {
// Objects with different constructors are not equivalent.
if ('constructor' in a != 'constructor' in b || a.constructor != b.constructor) return false;
// Deep compare objects.
for (var key in a) {
if (_.has(a, key)) {
// Count the expected number of properties.
size++;
// Deep compare each member.
if (!(result = _.has(b, key) && eq(a[key], b[key], stack))) break;
}
}
// Ensure that both objects contain the same number of properties.
if (result) {
for (key in b) {
if (_.has(b, key) && !(size--)) break;
}
result = !size;
}
}
// Remove the first object from the stack of traversed objects.
stack.pop();
return result;
}
// Perform a deep comparison to check if two objects are equal.
_.isEqual = function(a, b) {
return eq(a, b, []);
};
// Is a given array, string, or object empty?
// An "empty" object has no enumerable own-properties.
_.isEmpty = function(obj) {
if (_.isArray(obj) || _.isString(obj)) return obj.length === 0;
for (var key in obj) if (_.has(obj, key)) return false;
return true;
};
// Is a given value a DOM element?
_.isElement = function(obj) {
return !!(obj && obj.nodeType == 1);
};
// Is a given value an array?
// Delegates to ECMA5's native Array.isArray
_.isArray = nativeIsArray || function(obj) {
return toString.call(obj) == '[object Array]';
};
// Is a given variable an object?
_.isObject = function(obj) {
return obj === Object(obj);
};
// Is a given variable an arguments object?
_.isArguments = function(obj) {
return toString.call(obj) == '[object Arguments]';
};
if (!_.isArguments(arguments)) {
_.isArguments = function(obj) {
return !!(obj && _.has(obj, 'callee'));
};
}
// Is a given value a function?
_.isFunction = function(obj) {
return toString.call(obj) == '[object Function]';
};
// Is a given value a string?
_.isString = function(obj) {
return toString.call(obj) == '[object String]';
};
// Is a given value a number?
_.isNumber = function(obj) {
return toString.call(obj) == '[object Number]';
};
// Is the given value `NaN`?
_.isNaN = function(obj) {
// `NaN` is the only value for which `===` is not reflexive.
return obj !== obj;
};
// Is a given value a boolean?
_.isBoolean = function(obj) {
return obj === true || obj === false || toString.call(obj) == '[object Boolean]';
};
// Is a given value a date?
_.isDate = function(obj) {
return toString.call(obj) == '[object Date]';
};
// Is the given value a regular expression?
_.isRegExp = function(obj) {
return toString.call(obj) == '[object RegExp]';
};
// Is a given value equal to null?
_.isNull = function(obj) {
return obj === null;
};
// Is a given variable undefined?
_.isUndefined = function(obj) {
return obj === void 0;
};
// Has own property?
_.has = function(obj, key) {
return hasOwnProperty.call(obj, key);
};
// Utility Functions
// -----------------
// Run Underscore.js in *noConflict* mode, returning the `_` variable to its
// previous owner. Returns a reference to the Underscore object.
_.noConflict = function() {
root._ = previousUnderscore;
return this;
};
// Keep the identity function around for default iterators.
_.identity = function(value) {
return value;
};
// Run a function **n** times.
_.times = function (n, iterator, context) {
for (var i = 0; i < n; i++) iterator.call(context, i);
};
// Escape a string for HTML interpolation.
_.escape = function(string) {
return (''+string).replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"').replace(/'/g, ''').replace(/\//g,'/');
};
// Add your own custom functions to the Underscore object, ensuring that
// they're correctly added to the OOP wrapper as well.
_.mixin = function(obj) {
each(_.functions(obj), function(name){
addToWrapper(name, _[name] = obj[name]);
});
};
// Generate a unique integer id (unique within the entire client session).
// Useful for temporary DOM ids.
var idCounter = 0;
_.uniqueId = function(prefix) {
var id = idCounter++;
return prefix ? prefix + id : id;
};
// By default, Underscore uses ERB-style template delimiters, change the
// following template settings to use alternative delimiters.
_.templateSettings = {
evaluate : /<%([\s\S]+?)%>/g,
interpolate : /<%=([\s\S]+?)%>/g,
escape : /<%-([\s\S]+?)%>/g
};
// When customizing `templateSettings`, if you don't want to define an
// interpolation, evaluation or escaping regex, we need one that is
// guaranteed not to match.
var noMatch = /.^/;
// Within an interpolation, evaluation, or escaping, remove HTML escaping
// that had been previously added.
var unescape = function(code) {
return code.replace(/\\\\/g, '\\').replace(/\\'/g, "'");
};
// JavaScript micro-templating, similar to John Resig's implementation.
// Underscore templating handles arbitrary delimiters, preserves whitespace,
// and correctly escapes quotes within interpolated code.
_.template = function(str, data) {
var c = _.templateSettings;
var tmpl = 'var __p=[],print=function(){__p.push.apply(__p,arguments);};' +
'with(obj||{}){__p.push(\'' +
str.replace(/\\/g, '\\\\')
.replace(/'/g, "\\'")
.replace(c.escape || noMatch, function(match, code) {
return "',_.escape(" + unescape(code) + "),'";
})
.replace(c.interpolate || noMatch, function(match, code) {
return "'," + unescape(code) + ",'";
})
.replace(c.evaluate || noMatch, function(match, code) {
return "');" + unescape(code).replace(/[\r\n\t]/g, ' ') + ";__p.push('";
})
.replace(/\r/g, '\\r')
.replace(/\n/g, '\\n')
.replace(/\t/g, '\\t')
+ "');}return __p.join('');";
var func = new Function('obj', '_', tmpl);
if (data) return func(data, _);
return function(data) {
return func.call(this, data, _);
};
};
// Add a "chain" function, which will delegate to the wrapper.
_.chain = function(obj) {
return _(obj).chain();
};
// The OOP Wrapper
// ---------------
// If Underscore is called as a function, it returns a wrapped object that
// can be used OO-style. This wrapper holds altered versions of all the
// underscore functions. Wrapped objects may be chained.
var wrapper = function(obj) { this._wrapped = obj; };
// Expose `wrapper.prototype` as `_.prototype`
_.prototype = wrapper.prototype;
// Helper function to continue chaining intermediate results.
var result = function(obj, chain) {
return chain ? _(obj).chain() : obj;
};
// A method to easily add functions to the OOP wrapper.
var addToWrapper = function(name, func) {
wrapper.prototype[name] = function() {
var args = slice.call(arguments);
unshift.call(args, this._wrapped);
return result(func.apply(_, args), this._chain);
};
};
// Add all of the Underscore functions to the wrapper object.
_.mixin(_);
// Add all mutator Array functions to the wrapper.
each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) {
var method = ArrayProto[name];
wrapper.prototype[name] = function() {
var wrapped = this._wrapped;
method.apply(wrapped, arguments);
var length = wrapped.length;
if ((name == 'shift' || name == 'splice') && length === 0) delete wrapped[0];
return result(wrapped, this._chain);
};
});
// Add all accessor Array functions to the wrapper.
each(['concat', 'join', 'slice'], function(name) {
var method = ArrayProto[name];
wrapper.prototype[name] = function() {
return result(method.apply(this._wrapped, arguments), this._chain);
};
});
// Start chaining a wrapped Underscore object.
wrapper.prototype.chain = function() {
this._chain = true;
return this;
};
// Extracts the result from a wrapped and chained object.
wrapper.prototype.value = function() {
return this._wrapped;
};
}).call(this); | PypiClean |
/CaseRecommender-1.1.1.tar.gz/CaseRecommender-1.1.1/caserec/recommenders/rating_prediction/base_nsvd1.py | # © 2019. Case Recommender (MIT License)
import numpy as np
from caserec.recommenders.rating_prediction.base_rating_prediction import BaseRatingPrediction
__author__ = 'Arthur Fortes <fortes.arthur@gmail.com>'
class BaseNSVD1(BaseRatingPrediction):
def __init__(self, train_file, test_file, output_file=None, factors=10, init_mean=0, init_stdev=0.1,
sep='\t', output_sep='\t', random_seed=None):
"""
This class is base for all NSVD1 algorithms.
:param train_file: File which contains the train set. This file needs to have at least 3 columns
(user item feedback_value).
:type train_file: str
:param test_file: File which contains the test set. This file needs to have at least 3 columns
(user item feedback_value).
:type test_file: str, default None
:param output_file: File with dir to write the final predictions
:type output_file: str, default None
:param factors: Number of latent factors per user/item
:type factors: int, default 10
:param init_mean: Mean of the normal distribution used to initialize the latent factors
:type init_mean: float, default 0
:param init_stdev: Standard deviation of the normal distribution used to initialize the latent factors
:type init_stdev: float, default 0.1
:param sep: Delimiter for input files
:type sep: str, default'\t'
:param output_sep: Delimiter for output file
:type output_sep: str, default '\t'
:param random_seed: Number of seed. Lock random numbers for reproducibility of experiments.
:type random_seed: int, default None
"""
super(BaseNSVD1, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, sep=sep,
output_sep=output_sep)
self.factors = factors
self.init_mean = init_mean
self.init_stdev = init_stdev
if random_seed is not None:
np.random.seed(random_seed)
# internal vars
self.number_users = len(self.users)
self.number_items = len(self.items)
self.item_to_item_id = {}
self.item_id_to_item = {}
self.user_to_user_id = {}
self.user_id_to_user = {}
self.x = None
self.p = None
self.q = None
self.w = None
self.b = None
self.c = None
self.metadata = None
self.number_metadata = None
self.last_rmse = 0
self.predictions = []
def init_model(self):
"""
Method to treat and initialize the model
"""
# Map items and users with their respective ids and upgrade unobserved items with test set samples
for i, item in enumerate(self.items):
self.item_to_item_id.update({item: i})
self.item_id_to_item.update({i: item})
for u, user in enumerate(self.users):
self.user_to_user_id.update({user: u})
self.user_id_to_user.update({u: user})
def create_factors(self):
self.b = np.random.normal(self.init_mean, self.init_stdev, self.number_users)
self.c = np.random.normal(self.init_mean, self.init_stdev, self.number_items)
self.p = np.random.normal(self.init_mean, self.init_stdev, (self.number_users, self.factors))
self.q = np.random.normal(self.init_mean, self.init_stdev, (self.number_items, self.factors))
self.w = np.random.normal(self.init_mean, self.init_stdev, (self.number_metadata, self.factors))
def _predict(self, user, item, cond=True):
rui = self.b[user] + self.c[item] + np.dot(self.p[user], self.q[item])
if cond:
if rui > self.train_set["max_value"]:
rui = self.train_set["max_value"]
if rui < self.train_set["min_value"]:
rui = self.train_set["min_value"]
return rui
def predict(self):
"""
This method computes a final rating for unknown pairs (user, item)
"""
if self.test_file is not None:
for user in self.test_set['users']:
for item in self.test_set['feedback'][user]:
rui = self._predict(self.user_to_user_id[user], self.item_to_item_id[item])
self.predictions.append((user, item, rui))
else:
raise NotImplemented | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/mode-svg.js | ace.define("ace/mode/xml_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var XmlHighlightRules = function(normalize) {
this.$rules = {
start : [
{token : "string.cdata.xml", regex : "<\\!\\[CDATA\\[", next : "cdata"},
{
token : ["punctuation.xml-decl.xml", "keyword.xml-decl.xml"],
regex : "(<\\?)(xml)(?=[\\s])", next : "xml_decl", caseInsensitive: true
},
{
token : ["punctuation.instruction.xml", "keyword.instruction.xml"],
regex : "(<\\?)([-_a-zA-Z0-9]+)", next : "processing_instruction",
},
{token : "comment.xml", regex : "<\\!--", next : "comment"},
{
token : ["xml-pe.doctype.xml", "xml-pe.doctype.xml"],
regex : "(<\\!)(DOCTYPE)(?=[\\s])", next : "doctype", caseInsensitive: true
},
{include : "tag"},
{token : "text.end-tag-open.xml", regex: "</"},
{token : "text.tag-open.xml", regex: "<"},
{include : "reference"},
{defaultToken : "text.xml"}
],
xml_decl : [{
token : "entity.other.attribute-name.decl-attribute-name.xml",
regex : "(?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+"
}, {
token : "keyword.operator.decl-attribute-equals.xml",
regex : "="
}, {
include: "whitespace"
}, {
include: "string"
}, {
token : "punctuation.xml-decl.xml",
regex : "\\?>",
next : "start"
}],
processing_instruction : [
{token : "punctuation.instruction.xml", regex : "\\?>", next : "start"},
{defaultToken : "instruction.xml"}
],
doctype : [
{include : "whitespace"},
{include : "string"},
{token : "xml-pe.doctype.xml", regex : ">", next : "start"},
{token : "xml-pe.xml", regex : "[-_a-zA-Z0-9:]+"},
{token : "punctuation.int-subset", regex : "\\[", push : "int_subset"}
],
int_subset : [{
token : "text.xml",
regex : "\\s+"
}, {
token: "punctuation.int-subset.xml",
regex: "]",
next: "pop"
}, {
token : ["punctuation.markup-decl.xml", "keyword.markup-decl.xml"],
regex : "(<\\!)([-_a-zA-Z0-9]+)",
push : [{
token : "text",
regex : "\\s+"
},
{
token : "punctuation.markup-decl.xml",
regex : ">",
next : "pop"
},
{include : "string"}]
}],
cdata : [
{token : "string.cdata.xml", regex : "\\]\\]>", next : "start"},
{token : "text.xml", regex : "\\s+"},
{token : "text.xml", regex : "(?:[^\\]]|\\](?!\\]>))+"}
],
comment : [
{token : "comment.xml", regex : "-->", next : "start"},
{defaultToken : "comment.xml"}
],
reference : [{
token : "constant.language.escape.reference.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
attr_reference : [{
token : "constant.language.escape.reference.attribute-value.xml",
regex : "(?:&#[0-9]+;)|(?:&#x[0-9a-fA-F]+;)|(?:&[a-zA-Z0-9_:\\.-]+;)"
}],
tag : [{
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag.punctuation.end-tag-open.xml", "meta.tag.tag-name.xml"],
regex : "(?:(<)|(</))((?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+)",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : "start"}
]
}],
tag_whitespace : [
{token : "text.tag-whitespace.xml", regex : "\\s+"}
],
whitespace : [
{token : "text.whitespace.xml", regex : "\\s+"}
],
string: [{
token : "string.xml",
regex : "'",
push : [
{token : "string.xml", regex: "'", next: "pop"},
{defaultToken : "string.xml"}
]
}, {
token : "string.xml",
regex : '"',
push : [
{token : "string.xml", regex: '"', next: "pop"},
{defaultToken : "string.xml"}
]
}],
attributes: [{
token : "entity.other.attribute-name.xml",
regex : "(?:[-_a-zA-Z0-9]+:)?[-_a-zA-Z0-9]+"
}, {
token : "keyword.operator.attribute-equals.xml",
regex : "="
}, {
include: "tag_whitespace"
}, {
include: "attribute_value"
}],
attribute_value: [{
token : "string.attribute-value.xml",
regex : "'",
push : [
{token : "string.attribute-value.xml", regex: "'", next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}, {
token : "string.attribute-value.xml",
regex : '"',
push : [
{token : "string.attribute-value.xml", regex: '"', next: "pop"},
{include : "attr_reference"},
{defaultToken : "string.attribute-value.xml"}
]
}]
};
if (this.constructor === XmlHighlightRules)
this.normalizeRules();
};
(function() {
this.embedTagRules = function(HighlightRules, prefix, tag){
this.$rules.tag.unshift({
token : ["meta.tag.punctuation.tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(<)(" + tag + "(?=\\s|>|$))",
next: [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next : prefix + "start"}
]
});
this.$rules[tag + "-end"] = [
{include : "attributes"},
{token : "meta.tag.punctuation.tag-close.xml", regex : "/?>", next: "start",
onMatch : function(value, currentState, stack) {
stack.splice(0);
return this.token;
}}
]
this.embedRules(HighlightRules, prefix, [{
token: ["meta.tag.punctuation.end-tag-open.xml", "meta.tag." + tag + ".tag-name.xml"],
regex : "(</)(" + tag + "(?=\\s|>|$))",
next: tag + "-end"
}, {
token: "string.cdata.xml",
regex : "<\\!\\[CDATA\\["
}, {
token: "string.cdata.xml",
regex : "\\]\\]>"
}]);
};
}).call(TextHighlightRules.prototype);
oop.inherits(XmlHighlightRules, TextHighlightRules);
exports.XmlHighlightRules = XmlHighlightRules;
});
ace.define("ace/mode/behaviour/xml",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
var XmlBehaviour = function () {
this.add("string_dquotes", "insertion", function (state, action, editor, session, text) {
if (text == '"' || text == "'") {
var quote = text;
var selected = session.doc.getTextRange(editor.getSelectionRange());
if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) {
return {
text: quote + selected + quote,
selection: false
};
}
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
var iterator = new TokenIterator(session, cursor.row, cursor.column);
var token = iterator.getCurrentToken();
if (rightChar == quote && (is(token, "attribute-value") || is(token, "string"))) {
return {
text: "",
selection: [1, 1]
};
}
if (!token)
token = iterator.stepBackward();
if (!token)
return;
while (is(token, "tag-whitespace") || is(token, "whitespace")) {
token = iterator.stepBackward();
}
var rightSpace = !rightChar || rightChar.match(/\s/);
if (is(token, "attribute-equals") && (rightSpace || rightChar == '>') || (is(token, "decl-attribute-equals") && (rightSpace || rightChar == '?'))) {
return {
text: quote + quote,
selection: [1, 1]
};
}
}
});
this.add("string_dquotes", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && (selected == '"' || selected == "'")) {
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == selected) {
range.end.column++;
return range;
}
}
});
this.add("autoclosing", "insertion", function (state, action, editor, session, text) {
if (text == '>') {
var position = editor.getCursorPosition();
var iterator = new TokenIterator(session, position.row, position.column);
var token = iterator.getCurrentToken() || iterator.stepBackward();
if (!token || !(is(token, "tag-name") || is(token, "tag-whitespace") || is(token, "attribute-name") || is(token, "attribute-equals") || is(token, "attribute-value")))
return;
if (is(token, "reference.attribute-value"))
return;
if (is(token, "attribute-value")) {
var firstChar = token.value.charAt(0);
if (firstChar == '"' || firstChar == "'") {
var lastChar = token.value.charAt(token.value.length - 1);
var tokenEnd = iterator.getCurrentTokenColumn() + token.value.length;
if (tokenEnd > position.column || tokenEnd == position.column && firstChar != lastChar)
return;
}
}
while (!is(token, "tag-name")) {
token = iterator.stepBackward();
}
var tokenRow = iterator.getCurrentTokenRow();
var tokenColumn = iterator.getCurrentTokenColumn();
if (is(iterator.stepBackward(), "end-tag-open"))
return;
var element = token.value;
if (tokenRow == position.row)
element = element.substring(0, position.column - tokenColumn);
if (this.voidElements.hasOwnProperty(element.toLowerCase()))
return;
return {
text: '>' + '</' + element + '>',
selection: [1, 1]
};
}
});
this.add('autoindent', 'insertion', function (state, action, editor, session, text) {
if (text == "\n") {
var cursor = editor.getCursorPosition();
var line = session.getLine(cursor.row);
var rightChars = line.substring(cursor.column, cursor.column + 2);
if (rightChars == '</') {
var next_indent = this.$getIndent(line);
var indent = next_indent + session.getTabString();
return {
text: '\n' + indent + '\n' + next_indent,
selection: [1, indent.length, 1, indent.length]
};
}
}
});
};
oop.inherits(XmlBehaviour, Behaviour);
exports.XmlBehaviour = XmlBehaviour;
});
ace.define("ace/mode/folding/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/range","ace/mode/folding/fold_mode","ace/token_iterator"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var lang = require("../../lib/lang");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var TokenIterator = require("../../token_iterator").TokenIterator;
var FoldMode = exports.FoldMode = function(voidElements, optionalEndTags) {
BaseFoldMode.call(this);
this.voidElements = oop.mixin(voidElements || {}, optionalEndTags || {});
};
oop.inherits(FoldMode, BaseFoldMode);
var Tag = function() {
this.tagName = "";
this.closing = false;
this.selfClosing = false;
this.start = {row: 0, column: 0};
this.end = {row: 0, column: 0};
};
function is(token, type) {
return token.type.lastIndexOf(type + ".xml") > -1;
}
(function() {
this.getFoldWidget = function(session, foldStyle, row) {
var tag = this._getFirstTagInLine(session, row);
if (!tag)
return "";
if (tag.closing || (!tag.tagName && tag.selfClosing))
return foldStyle == "markbeginend" ? "end" : "";
if (!tag.tagName || tag.selfClosing || this.voidElements.hasOwnProperty(tag.tagName.toLowerCase()))
return "";
if (this._findEndTagInLine(session, row, tag.tagName, tag.end.column))
return "";
return "start";
};
this._getFirstTagInLine = function(session, row) {
var tokens = session.getTokens(row);
var tag = new Tag();
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
if (is(token, "tag-open")) {
tag.end.column = tag.start.column + token.value.length;
tag.closing = is(token, "end-tag-open");
token = tokens[++i];
if (!token)
return null;
tag.tagName = token.value;
tag.end.column += token.value.length;
for (i++; i < tokens.length; i++) {
token = tokens[i];
tag.end.column += token.value.length;
if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
break;
}
}
return tag;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == '/>';
return tag;
}
tag.start.column += token.value.length;
}
return null;
};
this._findEndTagInLine = function(session, row, tagName, startColumn) {
var tokens = session.getTokens(row);
var column = 0;
for (var i = 0; i < tokens.length; i++) {
var token = tokens[i];
column += token.value.length;
if (column < startColumn)
continue;
if (is(token, "end-tag-open")) {
token = tokens[i + 1];
if (token && token.value == tagName)
return true;
}
}
return false;
};
this._readTagForward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
iterator.stepForward();
return tag;
}
} while(token = iterator.stepForward());
return null;
};
this._readTagBackward = function(iterator) {
var token = iterator.getCurrentToken();
if (!token)
return null;
var tag = new Tag();
do {
if (is(token, "tag-open")) {
tag.closing = is(token, "end-tag-open");
tag.start.row = iterator.getCurrentTokenRow();
tag.start.column = iterator.getCurrentTokenColumn();
iterator.stepBackward();
return tag;
} else if (is(token, "tag-name")) {
tag.tagName = token.value;
} else if (is(token, "tag-close")) {
tag.selfClosing = token.value == "/>";
tag.end.row = iterator.getCurrentTokenRow();
tag.end.column = iterator.getCurrentTokenColumn() + token.value.length;
}
} while(token = iterator.stepBackward());
return null;
};
this._pop = function(stack, tag) {
while (stack.length) {
var top = stack[stack.length-1];
if (!tag || top.tagName == tag.tagName) {
return stack.pop();
}
else if (this.voidElements.hasOwnProperty(tag.tagName)) {
return;
}
else if (this.voidElements.hasOwnProperty(top.tagName)) {
stack.pop();
continue;
} else {
return null;
}
}
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var firstTag = this._getFirstTagInLine(session, row);
if (!firstTag)
return null;
var isBackward = firstTag.closing || firstTag.selfClosing;
var stack = [];
var tag;
if (!isBackward) {
var iterator = new TokenIterator(session, row, firstTag.start.column);
var start = {
row: row,
column: firstTag.start.column + firstTag.tagName.length + 2
};
while (tag = this._readTagForward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (tag.closing) {
this._pop(stack, tag);
if (stack.length == 0)
return Range.fromPoints(start, tag.start);
}
else {
stack.push(tag);
}
}
}
else {
var iterator = new TokenIterator(session, row, firstTag.end.column);
var end = {
row: row,
column: firstTag.start.column
};
while (tag = this._readTagBackward(iterator)) {
if (tag.selfClosing) {
if (!stack.length) {
tag.start.column += tag.tagName.length + 2;
tag.end.column -= 2;
return Range.fromPoints(tag.start, tag.end);
} else
continue;
}
if (!tag.closing) {
this._pop(stack, tag);
if (stack.length == 0) {
tag.start.column += tag.tagName.length + 2;
return Range.fromPoints(tag.start, end);
}
}
else {
stack.push(tag);
}
}
}
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/xml",["require","exports","module","ace/lib/oop","ace/lib/lang","ace/mode/text","ace/mode/xml_highlight_rules","ace/mode/behaviour/xml","ace/mode/folding/xml"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var lang = require("../lib/lang");
var TextMode = require("./text").Mode;
var XmlHighlightRules = require("./xml_highlight_rules").XmlHighlightRules;
var XmlBehaviour = require("./behaviour/xml").XmlBehaviour;
var XmlFoldMode = require("./folding/xml").FoldMode;
var Mode = function() {
this.HighlightRules = XmlHighlightRules;
this.$behaviour = new XmlBehaviour();
this.foldingRules = new XmlFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.voidElements = lang.arrayToMap([]);
this.blockComment = {start: "<!--", end: "-->"};
this.$id = "ace/mode/xml";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/doc_comment_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var DocCommentHighlightRules = function() {
this.$rules = {
"start" : [ {
token : "comment.doc.tag",
regex : "@[\\w\\d_]+" // TODO: fix email addresses
}, {
token : "comment.doc.tag",
regex : "\\bTODO\\b"
}, {
defaultToken : "comment.doc"
}]
};
};
oop.inherits(DocCommentHighlightRules, TextHighlightRules);
DocCommentHighlightRules.getStartRule = function(start) {
return {
token : "comment.doc", // doc comment
regex : "\\/\\*(?=\\*)",
next : start
};
};
DocCommentHighlightRules.getEndRule = function (start) {
return {
token : "comment.doc", // closing comment
regex : "\\*\\/",
next : start
};
};
exports.DocCommentHighlightRules = DocCommentHighlightRules;
});
ace.define("ace/mode/javascript_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/doc_comment_highlight_rules","ace/mode/text_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var DocCommentHighlightRules = require("./doc_comment_highlight_rules").DocCommentHighlightRules;
var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var JavaScriptHighlightRules = function() {
var keywordMapper = this.createKeywordMapper({
"variable.language":
"Array|Boolean|Date|Function|Iterator|Number|Object|RegExp|String|Proxy|" + // Constructors
"Namespace|QName|XML|XMLList|" + // E4X
"ArrayBuffer|Float32Array|Float64Array|Int16Array|Int32Array|Int8Array|" +
"Uint16Array|Uint32Array|Uint8Array|Uint8ClampedArray|" +
"Error|EvalError|InternalError|RangeError|ReferenceError|StopIteration|" + // Errors
"SyntaxError|TypeError|URIError|" +
"decodeURI|decodeURIComponent|encodeURI|encodeURIComponent|eval|isFinite|" + // Non-constructor functions
"isNaN|parseFloat|parseInt|" +
"JSON|Math|" + // Other
"this|arguments|prototype|window|document" , // Pseudo
"keyword":
"const|yield|import|get|set|" +
"break|case|catch|continue|default|delete|do|else|finally|for|function|" +
"if|in|instanceof|new|return|switch|throw|try|typeof|let|var|while|with|debugger|" +
"__parent__|__count__|escape|unescape|with|__proto__|" +
"class|enum|extends|super|export|implements|private|public|interface|package|protected|static",
"storage.type":
"const|let|var|function",
"constant.language":
"null|Infinity|NaN|undefined",
"support.function":
"alert",
"constant.language.boolean": "true|false"
}, "identifier");
var kwBeforeRe = "case|do|else|finally|in|instanceof|return|throw|try|typeof|yield|void";
var identifierRe = "[a-zA-Z\\$_\u00a1-\uffff][a-zA-Z\\d\\$_\u00a1-\uffff]*\\b";
var escapedRe = "\\\\(?:x[0-9a-fA-F]{2}|" + // hex
"u[0-9a-fA-F]{4}|" + // unicode
"[0-2][0-7]{0,2}|" + // oct
"3[0-6][0-7]?|" + // oct
"37[0-7]?|" + // oct
"[4-7][0-7]?|" + //oct
".)";
this.$rules = {
"no_regex" : [
{
token : "comment",
regex : "\\/\\/",
next : "line_comment"
},
DocCommentHighlightRules.getStartRule("doc-start"),
{
token : "comment", // multi line comment
regex : /\/\*/,
next : "comment"
}, {
token : "string",
regex : "'(?=.)",
next : "qstring"
}, {
token : "string",
regex : '"(?=.)',
next : "qqstring"
}, {
token : "constant.numeric", // hex
regex : /0[xX][0-9a-fA-F]+\b/
}, {
token : "constant.numeric", // float
regex : /[+-]?\d+(?:(?:\.\d*)?(?:[eE][+-]?\d+)?)?\b/
}, {
token : [
"storage.type", "punctuation.operator", "support.function",
"punctuation.operator", "entity.name.function", "text","keyword.operator"
],
regex : "(" + identifierRe + ")(\\.)(prototype)(\\.)(" + identifierRe +")(\\s*)(=)",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "keyword.operator", "text", "storage.type",
"text", "paren.lparen"
],
regex : "(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "punctuation.operator", "entity.name.function", "text",
"keyword.operator", "text",
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\.)(" + identifierRe +")(\\s*)(=)(\\s*)(function)(\\s+)(\\w+)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"storage.type", "text", "entity.name.function", "text", "paren.lparen"
],
regex : "(function)(\\s+)(" + identifierRe + ")(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"entity.name.function", "text", "punctuation.operator",
"text", "storage.type", "text", "paren.lparen"
],
regex : "(" + identifierRe + ")(\\s*)(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : [
"text", "text", "storage.type", "text", "paren.lparen"
],
regex : "(:)(\\s*)(function)(\\s*)(\\()",
next: "function_arguments"
}, {
token : "keyword",
regex : "(?:" + kwBeforeRe + ")\\b",
next : "start"
}, {
token : ["punctuation.operator", "support.function"],
regex : /(\.)(s(?:h(?:ift|ow(?:Mod(?:elessDialog|alDialog)|Help))|croll(?:X|By(?:Pages|Lines)?|Y|To)?|t(?:op|rike)|i(?:n|zeToContent|debar|gnText)|ort|u(?:p|b(?:str(?:ing)?)?)|pli(?:ce|t)|e(?:nd|t(?:Re(?:sizable|questHeader)|M(?:i(?:nutes|lliseconds)|onth)|Seconds|Ho(?:tKeys|urs)|Year|Cursor|Time(?:out)?|Interval|ZOptions|Date|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Date|FullYear)|FullYear|Active)|arch)|qrt|lice|avePreferences|mall)|h(?:ome|andleEvent)|navigate|c(?:har(?:CodeAt|At)|o(?:s|n(?:cat|textual|firm)|mpile)|eil|lear(?:Timeout|Interval)?|a(?:ptureEvents|ll)|reate(?:StyleSheet|Popup|EventObject))|t(?:o(?:GMTString|S(?:tring|ource)|U(?:TCString|pperCase)|Lo(?:caleString|werCase))|est|a(?:n|int(?:Enabled)?))|i(?:s(?:NaN|Finite)|ndexOf|talics)|d(?:isableExternalCapture|ump|etachEvent)|u(?:n(?:shift|taint|escape|watch)|pdateCommands)|j(?:oin|avaEnabled)|p(?:o(?:p|w)|ush|lugins.refresh|a(?:ddings|rse(?:Int|Float)?)|r(?:int|ompt|eference))|e(?:scape|nableExternalCapture|val|lementFromPoint|x(?:p|ec(?:Script|Command)?))|valueOf|UTC|queryCommand(?:State|Indeterm|Enabled|Value)|f(?:i(?:nd|le(?:ModifiedDate|Size|CreatedDate|UpdatedDate)|xed)|o(?:nt(?:size|color)|rward)|loor|romCharCode)|watch|l(?:ink|o(?:ad|g)|astIndexOf)|a(?:sin|nchor|cos|t(?:tachEvent|ob|an(?:2)?)|pply|lert|b(?:s|ort))|r(?:ou(?:nd|teEvents)|e(?:size(?:By|To)|calc|turnValue|place|verse|l(?:oad|ease(?:Capture|Events)))|andom)|g(?:o|et(?:ResponseHeader|M(?:i(?:nutes|lliseconds)|onth)|Se(?:conds|lection)|Hours|Year|Time(?:zoneOffset)?|Da(?:y|te)|UTC(?:M(?:i(?:nutes|lliseconds)|onth)|Seconds|Hours|Da(?:y|te)|FullYear)|FullYear|A(?:ttention|llResponseHeaders)))|m(?:in|ove(?:B(?:y|elow)|To(?:Absolute)?|Above)|ergeAttributes|a(?:tch|rgins|x))|b(?:toa|ig|o(?:ld|rderWidths)|link|ack))\b(?=\()/
}, {
token : ["punctuation.operator", "support.function.dom"],
regex : /(\.)(s(?:ub(?:stringData|mit)|plitText|e(?:t(?:NamedItem|Attribute(?:Node)?)|lect))|has(?:ChildNodes|Feature)|namedItem|c(?:l(?:ick|o(?:se|neNode))|reate(?:C(?:omment|DATASection|aption)|T(?:Head|extNode|Foot)|DocumentFragment|ProcessingInstruction|E(?:ntityReference|lement)|Attribute))|tabIndex|i(?:nsert(?:Row|Before|Cell|Data)|tem)|open|delete(?:Row|C(?:ell|aption)|T(?:Head|Foot)|Data)|focus|write(?:ln)?|a(?:dd|ppend(?:Child|Data))|re(?:set|place(?:Child|Data)|move(?:NamedItem|Child|Attribute(?:Node)?)?)|get(?:NamedItem|Element(?:sBy(?:Name|TagName)|ById)|Attribute(?:Node)?)|blur)\b(?=\()/
}, {
token : ["punctuation.operator", "support.constant"],
regex : /(\.)(s(?:ystemLanguage|cr(?:ipts|ollbars|een(?:X|Y|Top|Left))|t(?:yle(?:Sheets)?|atus(?:Text|bar)?)|ibling(?:Below|Above)|ource|uffixes|e(?:curity(?:Policy)?|l(?:ection|f)))|h(?:istory|ost(?:name)?|as(?:h|Focus))|y|X(?:MLDocument|SLDocument)|n(?:ext|ame(?:space(?:s|URI)|Prop))|M(?:IN_VALUE|AX_VALUE)|c(?:haracterSet|o(?:n(?:structor|trollers)|okieEnabled|lorDepth|mp(?:onents|lete))|urrent|puClass|l(?:i(?:p(?:boardData)?|entInformation)|osed|asses)|alle(?:e|r)|rypto)|t(?:o(?:olbar|p)|ext(?:Transform|Indent|Decoration|Align)|ags)|SQRT(?:1_2|2)|i(?:n(?:ner(?:Height|Width)|put)|ds|gnoreCase)|zIndex|o(?:scpu|n(?:readystatechange|Line)|uter(?:Height|Width)|p(?:sProfile|ener)|ffscreenBuffering)|NEGATIVE_INFINITY|d(?:i(?:splay|alog(?:Height|Top|Width|Left|Arguments)|rectories)|e(?:scription|fault(?:Status|Ch(?:ecked|arset)|View)))|u(?:ser(?:Profile|Language|Agent)|n(?:iqueID|defined)|pdateInterval)|_content|p(?:ixelDepth|ort|ersonalbar|kcs11|l(?:ugins|atform)|a(?:thname|dding(?:Right|Bottom|Top|Left)|rent(?:Window|Layer)?|ge(?:X(?:Offset)?|Y(?:Offset)?))|r(?:o(?:to(?:col|type)|duct(?:Sub)?|mpter)|e(?:vious|fix)))|e(?:n(?:coding|abledPlugin)|x(?:ternal|pando)|mbeds)|v(?:isibility|endor(?:Sub)?|Linkcolor)|URLUnencoded|P(?:I|OSITIVE_INFINITY)|f(?:ilename|o(?:nt(?:Size|Family|Weight)|rmName)|rame(?:s|Element)|gColor)|E|whiteSpace|l(?:i(?:stStyleType|n(?:eHeight|kColor))|o(?:ca(?:tion(?:bar)?|lName)|wsrc)|e(?:ngth|ft(?:Context)?)|a(?:st(?:M(?:odified|atch)|Index|Paren)|yer(?:s|X)|nguage))|a(?:pp(?:MinorVersion|Name|Co(?:deName|re)|Version)|vail(?:Height|Top|Width|Left)|ll|r(?:ity|guments)|Linkcolor|bove)|r(?:ight(?:Context)?|e(?:sponse(?:XML|Text)|adyState))|global|x|m(?:imeTypes|ultiline|enubar|argin(?:Right|Bottom|Top|Left))|L(?:N(?:10|2)|OG(?:10E|2E))|b(?:o(?:ttom|rder(?:Width|RightWidth|BottomWidth|Style|Color|TopWidth|LeftWidth))|ufferDepth|elow|ackground(?:Color|Image)))\b/
}, {
token : ["support.constant"],
regex : /that\b/
}, {
token : ["storage.type", "punctuation.operator", "support.function.firebug"],
regex : /(console)(\.)(warn|info|log|error|time|trace|timeEnd|assert)\b/
}, {
token : keywordMapper,
regex : identifierRe
}, {
token : "keyword.operator",
regex : /--|\+\+|[!$%&*+\-~]|===|==|=|!=|!==|<=|>=|<<=|>>=|>>>=|<>|<|>|!|&&|\|\||\?\:|\*=|%=|\+=|\-=|&=|\^=/,
next : "start"
}, {
token : "punctuation.operator",
regex : /\?|\:|\,|\;|\./,
next : "start"
}, {
token : "paren.lparen",
regex : /[\[({]/,
next : "start"
}, {
token : "paren.rparen",
regex : /[\])}]/
}, {
token : "keyword.operator",
regex : /\/=?/,
next : "start"
}, {
token: "comment",
regex: /^#!.*$/
}
],
"start": [
DocCommentHighlightRules.getStartRule("doc-start"),
{
token : "comment", // multi line comment
regex : "\\/\\*",
next : "comment_regex_allowed"
}, {
token : "comment",
regex : "\\/\\/",
next : "line_comment_regex_allowed"
}, {
token: "string.regexp",
regex: "\\/",
next: "regex"
}, {
token : "text",
regex : "\\s+|^$",
next : "start"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"regex": [
{
token: "regexp.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "string.regexp",
regex: "/[sxngimy]*",
next: "no_regex"
}, {
token : "invalid",
regex: /\{\d+\b,?\d*\}[+*]|[+*$^?][+*]|[$^][?]|\?{3,}/
}, {
token : "constant.language.escape",
regex: /\(\?[:=!]|\)|\{\d+\b,?\d*\}|[+*]\?|[()$^+*?.]/
}, {
token : "constant.language.delimiter",
regex: /\|/
}, {
token: "constant.language.escape",
regex: /\[\^?/,
next: "regex_character_class"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp"
}
],
"regex_character_class": [
{
token: "regexp.keyword.operator",
regex: "\\\\(?:u[\\da-fA-F]{4}|x[\\da-fA-F]{2}|.)"
}, {
token: "constant.language.escape",
regex: "]",
next: "regex"
}, {
token: "constant.language.escape",
regex: "-"
}, {
token: "empty",
regex: "$",
next: "no_regex"
}, {
defaultToken: "string.regexp.charachterclass"
}
],
"function_arguments": [
{
token: "variable.parameter",
regex: identifierRe
}, {
token: "punctuation.operator",
regex: "[, ]+"
}, {
token: "punctuation.operator",
regex: "$"
}, {
token: "empty",
regex: "",
next: "no_regex"
}
],
"comment_regex_allowed" : [
{token : "comment", regex : "\\*\\/", next : "start"},
{defaultToken : "comment"}
],
"comment" : [
{token : "comment", regex : "\\*\\/", next : "no_regex"},
{defaultToken : "comment"}
],
"line_comment_regex_allowed" : [
{token : "comment", regex : "$|^", next : "start"},
{defaultToken : "comment"}
],
"line_comment" : [
{token : "comment", regex : "$|^", next : "no_regex"},
{defaultToken : "comment"}
],
"qqstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qqstring"
}, {
token : "string",
regex : '"|$',
next : "no_regex"
}, {
defaultToken: "string"
}
],
"qstring" : [
{
token : "constant.language.escape",
regex : escapedRe
}, {
token : "string",
regex : "\\\\$",
next : "qstring"
}, {
token : "string",
regex : "'|$",
next : "no_regex"
}, {
defaultToken: "string"
}
]
};
this.embedRules(DocCommentHighlightRules, "doc-",
[ DocCommentHighlightRules.getEndRule("no_regex") ]);
};
oop.inherits(JavaScriptHighlightRules, TextHighlightRules);
exports.JavaScriptHighlightRules = JavaScriptHighlightRules;
});
ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"], function(require, exports, module) {
"use strict";
var Range = require("../range").Range;
var MatchingBraceOutdent = function() {};
(function() {
this.checkOutdent = function(line, input) {
if (! /^\s+$/.test(line))
return false;
return /^\s*\}/.test(input);
};
this.autoOutdent = function(doc, row) {
var line = doc.getLine(row);
var match = line.match(/^(\s*\})/);
if (!match) return 0;
var column = match[1].length;
var openBracePos = doc.findMatchingBracket({row: row, column: column});
if (!openBracePos || openBracePos.row == row) return 0;
var indent = this.$getIndent(doc.getLine(openBracePos.row));
doc.replace(new Range(row, 0, row, column-1), indent);
};
this.$getIndent = function(line) {
return line.match(/^\s*/)[0];
};
}).call(MatchingBraceOutdent.prototype);
exports.MatchingBraceOutdent = MatchingBraceOutdent;
});
ace.define("ace/mode/behaviour/cstyle",["require","exports","module","ace/lib/oop","ace/mode/behaviour","ace/token_iterator","ace/lib/lang"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Behaviour = require("../behaviour").Behaviour;
var TokenIterator = require("../../token_iterator").TokenIterator;
var lang = require("../../lib/lang");
var SAFE_INSERT_IN_TOKENS =
["text", "paren.rparen", "punctuation.operator"];
var SAFE_INSERT_BEFORE_TOKENS =
["text", "paren.rparen", "punctuation.operator", "comment"];
var context;
var contextCache = {}
var initContext = function(editor) {
var id = -1;
if (editor.multiSelect) {
id = editor.selection.id;
if (contextCache.rangeCount != editor.multiSelect.rangeCount)
contextCache = {rangeCount: editor.multiSelect.rangeCount};
}
if (contextCache[id])
return context = contextCache[id];
context = contextCache[id] = {
autoInsertedBrackets: 0,
autoInsertedRow: -1,
autoInsertedLineEnd: "",
maybeInsertedBrackets: 0,
maybeInsertedRow: -1,
maybeInsertedLineStart: "",
maybeInsertedLineEnd: ""
};
};
var CstyleBehaviour = function() {
this.add("braces", "insertion", function(state, action, editor, session, text) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (text == '{') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && selected !== "{" && editor.getWrapBehavioursEnabled()) {
return {
text: '{' + selected + '}',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
if (/[\]\}\)]/.test(line[cursor.column]) || editor.inMultiSelectMode) {
CstyleBehaviour.recordAutoInsert(editor, session, "}");
return {
text: '{}',
selection: [1, 1]
};
} else {
CstyleBehaviour.recordMaybeInsert(editor, session, "{");
return {
text: '{',
selection: [1, 1]
};
}
}
} else if (text == '}') {
initContext(editor);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == '}') {
var matching = session.$findOpeningBracket('}', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
} else if (text == "\n" || text == "\r\n") {
initContext(editor);
var closing = "";
if (CstyleBehaviour.isMaybeInsertedClosing(cursor, line)) {
closing = lang.stringRepeat("}", context.maybeInsertedBrackets);
CstyleBehaviour.clearMaybeInsertedClosing();
}
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar === '}') {
var openBracePos = session.findMatchingBracket({row: cursor.row, column: cursor.column+1}, '}');
if (!openBracePos)
return null;
var next_indent = this.$getIndent(session.getLine(openBracePos.row));
} else if (closing) {
var next_indent = this.$getIndent(line);
} else {
CstyleBehaviour.clearMaybeInsertedClosing();
return;
}
var indent = next_indent + session.getTabString();
return {
text: '\n' + indent + '\n' + next_indent + closing,
selection: [1, indent.length, 1, indent.length]
};
} else {
CstyleBehaviour.clearMaybeInsertedClosing();
}
});
this.add("braces", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '{') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.end.column, range.end.column + 1);
if (rightChar == '}') {
range.end.column++;
return range;
} else {
context.maybeInsertedBrackets--;
}
}
});
this.add("parens", "insertion", function(state, action, editor, session, text) {
if (text == '(') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && editor.getWrapBehavioursEnabled()) {
return {
text: '(' + selected + ')',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
CstyleBehaviour.recordAutoInsert(editor, session, ")");
return {
text: '()',
selection: [1, 1]
};
}
} else if (text == ')') {
initContext(editor);
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == ')') {
var matching = session.$findOpeningBracket(')', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
}
});
this.add("parens", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '(') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == ')') {
range.end.column++;
return range;
}
}
});
this.add("brackets", "insertion", function(state, action, editor, session, text) {
if (text == '[') {
initContext(editor);
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && editor.getWrapBehavioursEnabled()) {
return {
text: '[' + selected + ']',
selection: false
};
} else if (CstyleBehaviour.isSaneInsertion(editor, session)) {
CstyleBehaviour.recordAutoInsert(editor, session, "]");
return {
text: '[]',
selection: [1, 1]
};
}
} else if (text == ']') {
initContext(editor);
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == ']') {
var matching = session.$findOpeningBracket(']', {column: cursor.column + 1, row: cursor.row});
if (matching !== null && CstyleBehaviour.isAutoInsertedClosing(cursor, line, text)) {
CstyleBehaviour.popAutoInsertedClosing();
return {
text: '',
selection: [1, 1]
};
}
}
}
});
this.add("brackets", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && selected == '[') {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == ']') {
range.end.column++;
return range;
}
}
});
this.add("string_dquotes", "insertion", function(state, action, editor, session, text) {
if (text == '"' || text == "'") {
initContext(editor);
var quote = text;
var selection = editor.getSelectionRange();
var selected = session.doc.getTextRange(selection);
if (selected !== "" && selected !== "'" && selected != '"' && editor.getWrapBehavioursEnabled()) {
return {
text: quote + selected + quote,
selection: false
};
} else {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
var leftChar = line.substring(cursor.column-1, cursor.column);
if (leftChar == '\\') {
return null;
}
var tokens = session.getTokens(selection.start.row);
var col = 0, token;
var quotepos = -1; // Track whether we're inside an open quote.
for (var x = 0; x < tokens.length; x++) {
token = tokens[x];
if (token.type == "string") {
quotepos = -1;
} else if (quotepos < 0) {
quotepos = token.value.indexOf(quote);
}
if ((token.value.length + col) > selection.start.column) {
break;
}
col += tokens[x].value.length;
}
if (!token || (quotepos < 0 && token.type !== "comment" && (token.type !== "string" || ((selection.start.column !== token.value.length+col-1) && token.value.lastIndexOf(quote) === token.value.length-1)))) {
if (!CstyleBehaviour.isSaneInsertion(editor, session))
return;
return {
text: quote + quote,
selection: [1,1]
};
} else if (token && token.type === "string") {
var rightChar = line.substring(cursor.column, cursor.column + 1);
if (rightChar == quote) {
return {
text: '',
selection: [1, 1]
};
}
}
}
}
});
this.add("string_dquotes", "deletion", function(state, action, editor, session, range) {
var selected = session.doc.getTextRange(range);
if (!range.isMultiLine() && (selected == '"' || selected == "'")) {
initContext(editor);
var line = session.doc.getLine(range.start.row);
var rightChar = line.substring(range.start.column + 1, range.start.column + 2);
if (rightChar == selected) {
range.end.column++;
return range;
}
}
});
};
CstyleBehaviour.isSaneInsertion = function(editor, session) {
var cursor = editor.getCursorPosition();
var iterator = new TokenIterator(session, cursor.row, cursor.column);
if (!this.$matchTokenType(iterator.getCurrentToken() || "text", SAFE_INSERT_IN_TOKENS)) {
var iterator2 = new TokenIterator(session, cursor.row, cursor.column + 1);
if (!this.$matchTokenType(iterator2.getCurrentToken() || "text", SAFE_INSERT_IN_TOKENS))
return false;
}
iterator.stepForward();
return iterator.getCurrentTokenRow() !== cursor.row ||
this.$matchTokenType(iterator.getCurrentToken() || "text", SAFE_INSERT_BEFORE_TOKENS);
};
CstyleBehaviour.$matchTokenType = function(token, types) {
return types.indexOf(token.type || token) > -1;
};
CstyleBehaviour.recordAutoInsert = function(editor, session, bracket) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (!this.isAutoInsertedClosing(cursor, line, context.autoInsertedLineEnd[0]))
context.autoInsertedBrackets = 0;
context.autoInsertedRow = cursor.row;
context.autoInsertedLineEnd = bracket + line.substr(cursor.column);
context.autoInsertedBrackets++;
};
CstyleBehaviour.recordMaybeInsert = function(editor, session, bracket) {
var cursor = editor.getCursorPosition();
var line = session.doc.getLine(cursor.row);
if (!this.isMaybeInsertedClosing(cursor, line))
context.maybeInsertedBrackets = 0;
context.maybeInsertedRow = cursor.row;
context.maybeInsertedLineStart = line.substr(0, cursor.column) + bracket;
context.maybeInsertedLineEnd = line.substr(cursor.column);
context.maybeInsertedBrackets++;
};
CstyleBehaviour.isAutoInsertedClosing = function(cursor, line, bracket) {
return context.autoInsertedBrackets > 0 &&
cursor.row === context.autoInsertedRow &&
bracket === context.autoInsertedLineEnd[0] &&
line.substr(cursor.column) === context.autoInsertedLineEnd;
};
CstyleBehaviour.isMaybeInsertedClosing = function(cursor, line) {
return context.maybeInsertedBrackets > 0 &&
cursor.row === context.maybeInsertedRow &&
line.substr(cursor.column) === context.maybeInsertedLineEnd &&
line.substr(0, cursor.column) == context.maybeInsertedLineStart;
};
CstyleBehaviour.popAutoInsertedClosing = function() {
context.autoInsertedLineEnd = context.autoInsertedLineEnd.substr(1);
context.autoInsertedBrackets--;
};
CstyleBehaviour.clearMaybeInsertedClosing = function() {
if (context) {
context.maybeInsertedBrackets = 0;
context.maybeInsertedRow = -1;
}
};
oop.inherits(CstyleBehaviour, Behaviour);
exports.CstyleBehaviour = CstyleBehaviour;
});
ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var Range = require("../../range").Range;
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(commentRegex) {
if (commentRegex) {
this.foldingStartMarker = new RegExp(
this.foldingStartMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.start)
);
this.foldingStopMarker = new RegExp(
this.foldingStopMarker.source.replace(/\|[^|]*?$/, "|" + commentRegex.end)
);
}
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.foldingStartMarker = /(\{|\[)[^\}\]]*$|^\s*(\/\*)/;
this.foldingStopMarker = /^[^\[\{]*(\}|\])|^[\s\*]*(\*\/)/;
this.getFoldWidgetRange = function(session, foldStyle, row, forceMultiline) {
var line = session.getLine(row);
var match = line.match(this.foldingStartMarker);
if (match) {
var i = match.index;
if (match[1])
return this.openingBracketBlock(session, match[1], row, i);
var range = session.getCommentFoldRange(row, i + match[0].length, 1);
if (range && !range.isMultiLine()) {
if (forceMultiline) {
range = this.getSectionRange(session, row);
} else if (foldStyle != "all")
range = null;
}
return range;
}
if (foldStyle === "markbegin")
return;
var match = line.match(this.foldingStopMarker);
if (match) {
var i = match.index + match[0].length;
if (match[1])
return this.closingBracketBlock(session, match[1], row, i);
return session.getCommentFoldRange(row, i, -1);
}
};
this.getSectionRange = function(session, row) {
var line = session.getLine(row);
var startIndent = line.search(/\S/);
var startRow = row;
var startColumn = line.length;
row = row + 1;
var endRow = row;
var maxRow = session.getLength();
while (++row < maxRow) {
line = session.getLine(row);
var indent = line.search(/\S/);
if (indent === -1)
continue;
if (startIndent > indent)
break;
var subRange = this.getFoldWidgetRange(session, "all", row);
if (subRange) {
if (subRange.start.row <= startRow) {
break;
} else if (subRange.isMultiLine()) {
row = subRange.end.row;
} else if (startIndent == indent) {
break;
}
}
endRow = row;
}
return new Range(startRow, startColumn, endRow, session.getLine(endRow).length);
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/javascript",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/javascript_highlight_rules","ace/mode/matching_brace_outdent","ace/range","ace/worker/worker_client","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var TextMode = require("./text").Mode;
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var MatchingBraceOutdent = require("./matching_brace_outdent").MatchingBraceOutdent;
var Range = require("../range").Range;
var WorkerClient = require("../worker/worker_client").WorkerClient;
var CstyleBehaviour = require("./behaviour/cstyle").CstyleBehaviour;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
this.HighlightRules = JavaScriptHighlightRules;
this.$outdent = new MatchingBraceOutdent();
this.$behaviour = new CstyleBehaviour();
this.foldingRules = new CStyleFoldMode();
};
oop.inherits(Mode, TextMode);
(function() {
this.lineCommentStart = "//";
this.blockComment = {start: "/*", end: "*/"};
this.getNextLineIndent = function(state, line, tab) {
var indent = this.$getIndent(line);
var tokenizedLine = this.getTokenizer().getLineTokens(line, state);
var tokens = tokenizedLine.tokens;
var endState = tokenizedLine.state;
if (tokens.length && tokens[tokens.length-1].type == "comment") {
return indent;
}
if (state == "start" || state == "no_regex") {
var match = line.match(/^.*(?:\bcase\b.*\:|[\{\(\[])\s*$/);
if (match) {
indent += tab;
}
} else if (state == "doc-start") {
if (endState == "start" || endState == "no_regex") {
return "";
}
var match = line.match(/^\s*(\/?)\*/);
if (match) {
if (match[1]) {
indent += " ";
}
indent += "* ";
}
}
return indent;
};
this.checkOutdent = function(state, line, input) {
return this.$outdent.checkOutdent(line, input);
};
this.autoOutdent = function(state, doc, row) {
this.$outdent.autoOutdent(doc, row);
};
this.createWorker = function(session) {
var worker = new WorkerClient(["ace"], "ace/mode/javascript_worker", "JavaScriptWorker");
worker.attachToDocument(session.getDocument());
worker.on("jslint", function(results) {
session.setAnnotations(results.data);
});
worker.on("terminate", function() {
session.clearAnnotations();
});
return worker;
};
this.$id = "ace/mode/javascript";
}).call(Mode.prototype);
exports.Mode = Mode;
});
ace.define("ace/mode/svg_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/javascript_highlight_rules","ace/mode/xml_highlight_rules"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var JavaScriptHighlightRules = require("./javascript_highlight_rules").JavaScriptHighlightRules;
var XmlHighlightRules = require("./xml_highlight_rules").XmlHighlightRules;
var SvgHighlightRules = function() {
XmlHighlightRules.call(this);
this.embedTagRules(JavaScriptHighlightRules, "js-", "script");
this.normalizeRules();
};
oop.inherits(SvgHighlightRules, XmlHighlightRules);
exports.SvgHighlightRules = SvgHighlightRules;
});
ace.define("ace/mode/folding/mixed",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode"], function(require, exports, module) {
"use strict";
var oop = require("../../lib/oop");
var BaseFoldMode = require("./fold_mode").FoldMode;
var FoldMode = exports.FoldMode = function(defaultMode, subModes) {
this.defaultMode = defaultMode;
this.subModes = subModes;
};
oop.inherits(FoldMode, BaseFoldMode);
(function() {
this.$getMode = function(state) {
if (typeof state != "string")
state = state[0];
for (var key in this.subModes) {
if (state.indexOf(key) === 0)
return this.subModes[key];
}
return null;
};
this.$tryMode = function(state, session, foldStyle, row) {
var mode = this.$getMode(state);
return (mode ? mode.getFoldWidget(session, foldStyle, row) : "");
};
this.getFoldWidget = function(session, foldStyle, row) {
return (
this.$tryMode(session.getState(row-1), session, foldStyle, row) ||
this.$tryMode(session.getState(row), session, foldStyle, row) ||
this.defaultMode.getFoldWidget(session, foldStyle, row)
);
};
this.getFoldWidgetRange = function(session, foldStyle, row) {
var mode = this.$getMode(session.getState(row-1));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.$getMode(session.getState(row));
if (!mode || !mode.getFoldWidget(session, foldStyle, row))
mode = this.defaultMode;
return mode.getFoldWidgetRange(session, foldStyle, row);
};
}).call(FoldMode.prototype);
});
ace.define("ace/mode/svg",["require","exports","module","ace/lib/oop","ace/mode/xml","ace/mode/javascript","ace/mode/svg_highlight_rules","ace/mode/folding/mixed","ace/mode/folding/xml","ace/mode/folding/cstyle"], function(require, exports, module) {
"use strict";
var oop = require("../lib/oop");
var XmlMode = require("./xml").Mode;
var JavaScriptMode = require("./javascript").Mode;
var SvgHighlightRules = require("./svg_highlight_rules").SvgHighlightRules;
var MixedFoldMode = require("./folding/mixed").FoldMode;
var XmlFoldMode = require("./folding/xml").FoldMode;
var CStyleFoldMode = require("./folding/cstyle").FoldMode;
var Mode = function() {
XmlMode.call(this);
this.HighlightRules = SvgHighlightRules;
this.createModeDelegates({
"js-": JavaScriptMode
});
this.foldingRules = new MixedFoldMode(new XmlFoldMode(), {
"js-": new CStyleFoldMode()
});
};
oop.inherits(Mode, XmlMode);
(function() {
this.getNextLineIndent = function(state, line, tab) {
return this.$getIndent(line);
};
this.$id = "ace/mode/svg";
}).call(Mode.prototype);
exports.Mode = Mode;
}); | PypiClean |
/NREL-rex-0.2.84.tar.gz/NREL-rex-0.2.84/rex/joint_pd/joint_pd.py | from concurrent.futures import as_completed
import gc
import logging
import h5py
import numpy as np
import os
import pandas as pd
from warnings import warn
from rex.version import __version__
from rex.renewable_resource import WindResource
from rex.resource import Resource
from rex.utilities.execution import SpawnProcessPool
from rex.utilities.loggers import log_mem, log_versions
from rex.utilities.utilities import slice_sites, to_records_array
logger = logging.getLogger(__name__)
class JointPD:
"""
Compute the joint probability distribution between the desired variables
"""
def __init__(self, res_h5, res_cls=Resource, hsds=False):
"""
Parameters
----------
res_h5 : str
Path to resource h5 file(s)
res_cls : Class, optional
Resource handler class to use to access res_h5,
by default Resource
hsds : bool, optional
Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS
behind HSDS, by default False
"""
log_versions(logger)
self._res_h5 = res_h5
self._res_cls = res_cls
self._hsds = hsds
@property
def res_h5(self):
"""
Path to resource h5 file(s)
Returns
-------
str
"""
return self._res_h5
@property
def res_cls(self):
"""
Resource class to use to access wind_h5
Returns
-------
Class
"""
return self._res_cls
@staticmethod
def _make_bins(start, stop, step):
"""
Create bin edges from bin range
Parameters
----------
start : int
bin range start value
stop : int
bin range stop value
step : int
bin range step value
Returns
-------
bin_edges : ndarray
Vector of inclusive bin edges
"""
bin_edges = np.arange(start, stop + step, step)
return bin_edges
@classmethod
def compute_joint_pd(cls, res_h5, dset1, dset2, bins1, bins2,
res_cls=Resource, hsds=False,
sites_slice=None):
"""
Compute the joint probability distribution between the two given
datasets using the given bins for given sites
Parameters
----------
res_h5 : str
Path to resource h5 file(s)
dset1 : str
Dataset 1 to generate joint probability distribution for
dset2 : str
Dataset 2 to generate joint probabilty distribution for
bins1 : tuple
(start, stop, step) for dataset 1 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
bins2 : tuple
(start, stop, step) for dataset 2 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
sites : list | slice, optional
res_cls : Class, optional
Resource handler class to use to access res_h5,
by default Resource
hsds : bool, optional
Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS
behind HSDS, by default False
sites_slice : slice | None, optional
Sites to extract, if None all, by default None
Returns
-------
jpd : dict
Dictionary of joint probabilty distribution densities for given
sites
"""
if sites_slice is None:
sites_slice = slice(None, None, None)
elif isinstance(sites_slice, int):
sites_slice = [sites_slice]
with res_cls(res_h5, hsds=hsds) as f:
arr1 = f[dset1, :, sites_slice]
arr2 = f[dset2, :, sites_slice]
bins1 = cls._make_bins(*bins1)
bins2 = cls._make_bins(*bins2)
if isinstance(sites_slice, slice) and sites_slice.stop:
gids = list(range(*sites_slice.indices(sites_slice.stop)))
elif isinstance(sites_slice, (list, np.ndarray)):
gids = sites_slice
jpd = {}
for i, (a1, a2) in enumerate(zip(arr1.T, arr2.T)):
jpd[gids[i]] = np.histogram2d(a1, a2,
bins=(bins1, bins2),
density=True)[0].astype(np.float32)
return jpd
def _get_slices(self, dset1, dset2, sites=None, chunks_per_slice=5):
"""
Get slices to extract, ensure the shapes of dset1 and 2 match.
Parameters
----------
dset1 : str
Dataset 1 to generate joint probability distribution for
dset2 : str
Dataset 2 to generate joint probabilty distribution for
sites : list | slice, optional
Subset of sites to extract, by default None or all sites
chunks_per_slice : int, optional
Number of chunks to extract in each slice, by default 5
Returns
-------
slices : list
List of slices to extract
"""
with self.res_cls(self.res_h5) as f:
shape, _, chunks = f.get_dset_properties(dset1)
shape2, _, _ = f.get_dset_properties(dset2)
if shape != shape2:
msg = ("The shape of {}: {}, does not match the shape of {}: {}!"
.format(dset1, shape, dset2, shape2))
logger.error(msg)
raise RuntimeError(msg)
slices = slice_sites(shape, chunks, sites=sites,
chunks_per_slice=chunks_per_slice)
return slices
def compute(self, dset1, dset2, bins1, bins2, sites=None, max_workers=None,
chunks_per_worker=5):
"""
Compute joint probability distribution between given datasets using
given bins for all sites.
Parameters
----------
dset1 : str
Dataset 1 to generate joint probability distribution for
dset2 : str
Dataset 2 to generate joint probabilty distribution for
bins1 : tuple
(start, stop, step) for dataset 1 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
bins2 : tuple
(start, stop, step) for dataset 2 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
sites : list | slice, optional
Subset of sites to extract, by default None or all sites
max_workers : None | int, optional
Number of workers to use, if 1 run in serial, if None use all
available cores, by default None
chunks_per_worker : int, optional
Number of chunks to extract on each worker, by default 5
Returns
-------
jpd: pandas.DataFrame
DataFrame of joint probability distribution between given datasets
with given bins
"""
if max_workers is None:
max_workers = os.cpu_count()
slices = self._get_slices(dset1, dset2, sites,
chunks_per_slice=chunks_per_worker)
if len(slices) == 1:
max_workers = 1
jpd = {}
if max_workers > 1:
msg = ('Computing the joint probability distribution between {} '
'and {} in parallel using {} workers'
.format(dset1, dset2, max_workers))
logger.info(msg)
loggers = [__name__, 'rex']
with SpawnProcessPool(max_workers=max_workers,
loggers=loggers) as exe:
futures = []
for sites_slice in slices:
future = exe.submit(self.compute_joint_pd,
self.res_h5, dset1, dset2,
bins1, bins2,
res_cls=self.res_cls,
hsds=self._hsds,
sites_slice=sites_slice)
futures.append(future)
for i, future in enumerate(as_completed(futures)):
jpd.update(future.result())
logger.debug('Completed {} out of {} workers'
.format((i + 1), len(futures)))
else:
msg = ('Computing the joint probability distribution between {} '
'and {} in serial.'
.format(dset1, dset2))
logger.info(msg)
for i, sites_slice in enumerate(slices):
jpd.update(self.compute_joint_pd(
self.res_h5, dset1, dset2,
bins1, bins2,
res_cls=self.res_cls,
hsds=self._hsds,
sites_slice=sites_slice))
logger.debug('Completed {} out of {} sets of sites'
.format((i + 1), len(slices)))
gc.collect()
log_mem(logger)
bins1 = self._make_bins(*bins1)
bins2 = self._make_bins(*bins2)
index = np.meshgrid(bins1[:-1], bins2[:-1], indexing='ij')
index = np.array(index).T.reshape(-1, 2).astype(np.int16)
index = pd.MultiIndex.from_arrays(index.T, names=(dset1, dset2))
jpd = pd.DataFrame({k: v.flatten(order='F') for k, v
in jpd.items()}, index=index).sort_index(axis=1)
return jpd
def save(self, jpd, out_fpath):
"""
Save joint probability distribution to disk
Parameters
----------
jpd : pandas.DataFrame
Table of joint probability distribution densities to save
out_fpath : str
.csv, or .h5 file path to save joint probability
distribution to
"""
with self.res_cls(self.res_h5) as f:
meta = f['meta', jpd.columns.values]
logger.info('Writing joint probability distribution to {}'
.format(out_fpath))
if out_fpath.endswith('.csv'):
jpd.to_csv(out_fpath)
meta_fpath = out_fpath.split('.')[0] + '_meta.csv'
if os.path.exists(meta_fpath):
msg = ("Site meta data already exists at {}!")
logger.warning(msg)
warn(msg)
else:
logger.debug('Writing site meta data to {}'
.format(meta_fpath))
meta.to_csv(meta_fpath, index=False)
elif out_fpath.endswith('.h5'):
with h5py.File(out_fpath, mode='w') as f:
f.attrs['rex version'] = __version__
for i, n in enumerate(jpd.index.names):
logger.info('')
data = np.array(jpd.index.get_level_values(i))
dset = '{}-bins'.format(n)
logger.debug('Writing {}'.format(dset))
f.create_dataset(dset, shape=data.shape, dtype=data.dtype,
data=data)
logger.debug('Writing joint probability density values to jpd')
data = jpd.values
f.create_dataset('jpd', shape=data.shape, dtype=data.dtype,
data=data)
logger.debug('Writing site meta data to meta')
meta = to_records_array(meta)
f.create_dataset('meta', shape=meta.shape, dtype=meta.dtype,
data=meta)
else:
msg = ("Cannot save joint probability distribution, expecting "
".csv or .h5 path, but got: {}".format(out_fpath))
logger.error(msg)
raise OSError(msg)
@staticmethod
def plot_joint_pd(jpd, site=None, **kwargs):
"""
Plot the mean joint probability distribution accross all sites
(site=None), or the distribution for the single given site
Parameters
----------
jpd: pandas.DataFrame
DataFrame of joint probability distribution between given datasets
with given bins
site : int, optional
Site to plot distribution for, if None plot mean distribution
across all sites, by default None
"""
x, y = jpd.index.names
if site is not None:
msg = ("Can only plot the joint probabilty distribution for a "
"single site or the mean probability distribution accross "
"all sites (site=None), you provided: {}".format(site))
assert isinstance(site), msg
plt = jpd.loc[:, [site]].reset_index()
else:
site = 'mean_jpd'
plt = jpd.mean(axis=1)
plt.name = site
plt = plt.reset_index()
plt.plot.scatter(x=x, y=y, c=site, **kwargs)
@classmethod
def run(cls, res_h5, dset1, dset2, bins1, bins2,
sites=None, res_cls=Resource, hsds=False,
max_workers=None, chunks_per_worker=5, out_fpath=None):
"""
Compute joint probability distribution between given datasets using
given bins
Parameters
----------
res_h5 : str
Path to resource h5 file(s)
dset1 : str
Dataset 1 to generate joint probability distribution for
dset2 : str
Dataset 2 to generate joint probabilty distribution for
bins1 : tuple
(start, stop, step) for dataset 1 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
bins2 : tuple
(start, stop, step) for dataset 2 bins. The stop value is
inclusive, so (0, 6, 2) would yield three bins with edges (0, 2, 4,
6). If the stop value is not perfectly divisible by the step, the
last bin will overshoot the stop value.
sites : list | slice, optional
Subset of sites to extract, by default None or all sites
res_cls : Class, optional
Resource class to use to access res_h5, by default Resource
hsds : bool, optional
Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS
behind HSDS, by default False
max_workers : None | int, optional
Number of workers to use, if 1 run in serial, if None use all
available cores, by default None
chunks_per_worker : int, optional
Number of chunks to extract on each worker, by default 5
out_fpath : str, optional
.csv, or .h5 file path to save joint probability
distribution to
Returns
-------
out : pandas.DataFrame
DataFrame of joint probability distribution between given datasets
with given bins
"""
logger.info('Computing joint probability distribution between {} and '
'{} in {}'
.format(dset1, dset2, res_h5))
logger.debug('Computing joint probability distribution using:'
'\n-{} bins: {}'
'\n-{} bins: {}'
'\n-max workers: {}'
'\n-chunks per worker: {}'
.format(dset1, bins1, dset2, bins2, max_workers,
chunks_per_worker))
jpd = cls(res_h5, res_cls=res_cls, hsds=hsds)
out = jpd.compute(dset1, dset2, bins1, bins2,
sites=sites,
max_workers=max_workers,
chunks_per_worker=chunks_per_worker)
if out_fpath is not None:
jpd.save(out, out_fpath)
return out
@classmethod
def wind_rose(cls, wind_h5, hub_height, wspd_bins=(0, 30, 1),
wdir_bins=(0, 360, 5), sites=None, res_cls=WindResource,
hsds=False, max_workers=None, chunks_per_worker=5,
out_fpath=None):
"""
Compute wind rose at given hub height
Parameters
----------
wind_h5 : str
Path to resource h5 file(s)
hub_height : str | int
Hub-height to compute wind rose at
wspd_bins : tuple
(start, stop, step) for wind speed bins
wdir_bins : tuple
(start, stop, step) for wind direction bins
sites : list | slice, optional
Subset of sites to extract, by default None or all sites
res_cls : Class, optional
Resource class to use to access wind_h5, by default Resource
hsds : bool, optional
Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS
behind HSDS, by default False
max_workers : None | int, optional
Number of workers to use, if 1 run in serial, if None use all
available cores, by default None
chunks_per_worker : int, optional
Number of chunks to extract on each worker, by default 5
out_fpath : str, optional
.csv, or .h5 file path to save wind rose to
Returns
-------
wind_rose : pandas.DataFrame
DataFrame of wind rose frequencies at desired hub-height
"""
logger.info('Computing wind rose for {}m wind in {}'
.format(hub_height, wind_h5))
logger.debug('Computing wind rose using:'
'\n-wind speed bins: {}'
'\n-wind direction bins: {}'
'\n-max workers: {}'
'\n-chunks per worker: {}'
.format(wspd_bins, wdir_bins, max_workers,
chunks_per_worker))
wind_rose = cls(wind_h5, res_cls=res_cls, hsds=hsds)
wspd_dset = 'windspeed_{}m'.format(hub_height)
wdir_dset = 'winddirection_{}m'.format(hub_height)
out = wind_rose.compute(wspd_dset, wdir_dset, wspd_bins, wdir_bins,
sites=sites,
max_workers=max_workers,
chunks_per_worker=chunks_per_worker)
if out_fpath is not None:
wind_rose.save(out, out_fpath)
return out | PypiClean |
/Gizela-1.0.18.tar.gz/Gizela-1.0.18/gizela/test/PointDictTest.py |
from gizela.data.PointDict import *
from gizela.data.Coord import *
import unittest
class PointDictTestCase(unittest.TestCase):
def setUp(self):
self.c1=Coord()
self.c2=Coord(z=1)
self.c3=Coord(1,2)
self.c4=Coord(1,2,3)
self.c5=Coord(z=1)
self.c6=Coord(ori=40)
def tearDown(self):
pass
def test_insert_point(self):
self.assertEqual(self.c2.get_z(), 1)
self.assertEqual(self.c3.get_x(), 1)
self.assertEqual(self.c3.get_y(), 2)
self.assertEqual(self.c4.get_x(), 1)
self.assertEqual(self.c4.get_y(), 2)
self.assertEqual(self.c4.get_z(), 3)
self.assertEqual(self.c5.get_z(), 1)
self.assertEqual(self.c6.get_ori(), 40)
self.assertEqual(self.c3.get_xy(), (1,2))
self.assertEqual(self.c4.get_xyz(), (1,2,3))
def test_is(self):
self.assertEqual(self.c1.is_set_z(), False)
self.assertEqual(self.c1.is_set_xy(), False)
self.assertEqual(self.c1.is_set_xyz(), False)
self.assertEqual(self.c2.is_set_z(), True)
self.assertEqual(self.c2.is_set_xy(), False)
self.assertEqual(self.c2.is_set_xyz(), False)
self.assertEqual(self.c3.is_set_z(), False)
self.assertEqual(self.c3.is_set_xy(), True)
self.assertEqual(self.c3.is_set_xyz(), False)
self.assertEqual(self.c4.is_set_z(), True)
self.assertEqual(self.c4.is_set_xy(), True)
self.assertEqual(self.c4.is_set_xyz(), True)
self.assertEqual(self.c5.is_set_z(), True)
self.assertEqual(self.c5.is_set_xy(), False)
self.assertEqual(self.c5.is_set_xyz(), False)
self.assertEqual(self.c1.is_set_ori(), False)
self.assertEqual(self.c2.is_set_ori(), False)
self.assertEqual(self.c3.is_set_ori(), False)
self.assertEqual(self.c4.is_set_ori(), False)
self.assertEqual(self.c5.is_set_ori(), False)
self.assertEqual(self.c6.is_set_ori(), True)
def test_set(self):
self.c1.set_xy(10,20)
self.assertEqual(self.c1.get_xy(),(10,20))
self.c1.set_z(30)
self.assertEqual(self.c1.get_z(),30)
self.c2.set_xyz(10,20,30)
self.assertEqual(self.c2.get_xyz(),(10,20,30))
def test_unused(self):
self.c4.set_unused()
self.assertEqual(self.c4.is_fix_xy(), False)
self.assertEqual(self.c4.is_fix_xyz(), False)
self.assertEqual(self.c4.is_fix_z(), False)
self.assertEqual(self.c4.is_adj_xy(), False)
self.assertEqual(self.c4.is_adj_XY(), False)
self.assertEqual(self.c4.is_adj_xyz(), False)
self.assertEqual(self.c4.is_adj_XYZ(), False)
self.assertEqual(self.c4.is_adj_XYz(), False)
self.assertEqual(self.c4.is_adj_xyZ(), False)
def test_set_is_fix_adj_con_unused_active(self):
self.c1.set_fix_xy()
self.assertEqual(self.c1.is_fix_xy(), True)
self.c1.set_fix_z()
self.assertEqual(self.c1.is_fix_z(), True)
self.c1.set_fix_xyz()
self.assertEqual(self.c1.is_fix_xyz(), True)
self.c1.set_adj_xy()
self.assertEqual(self.c1.is_adj_xy(), True)
self.c1.set_adj_z()
self.assertEqual(self.c1.is_adj_z(), True)
self.c1.set_adj_xyz()
self.assertEqual(self.c1.is_adj_xyz(), True)
self.c1.set_con_xy()
self.assertEqual(self.c1.is_con_xy(), True)
self.c1.set_con_z()
self.assertEqual(self.c1.is_con_z(), True)
self.c1.set_adj_xyZ()
self.assertEqual(self.c1.is_adj_xyZ(), True)
self.c1.set_adj_XYz()
self.assertEqual(self.c1.is_adj_XYz(), True)
self.c1.set_adj_XYZ()
self.assertEqual(self.c1.is_adj_XYZ(), True)
#class CoordTestSuite(unittest.TestSuite):
# def __init__(self):
# caseClass = PointDictTestCase
# tests = [t for t in dir(caseClass) if t[:5] == 'test']
# print tests
# unittest.TestSuite.__init__(self,map(PointDictTestCase, tests))
#
#def suite():
# return unittest.makeSuite(PointDictTestCase)
if __name__ == "__main__":
unittest.main() | PypiClean |
/CountryGoogleScraper-0.2.10.tar.gz/CountryGoogleScraper-0.2.10/GoogleScraper/scrape_jobs.py |
import logging
logger = logging.getLogger(__name__)
"""
The core logic of GoogleScraper is handled here.
By default, every keyword is scraped on all given search engines for the supplied
number of pages.
Example:
keywords = ('one', 'two')
search_eninges = ('google, 'yandex')
num_pages = 5
Then the following requests are issued:
[('one', 'google', 0),
('one', 'google', 1),
('one', 'google', 2),
('one', 'google', 3),
('one', 'google', 4),
('one', 'yandex', 0),
('one', 'yandex', 1),
('one', 'yandex', 2),
('one', 'yandex', 3),
('one', 'yandex', 4),
('two', 'google', 0),
('two', 'google', 1),
('two', 'google', 2),
('two', 'google', 3),
('two', 'google', 4),
('two', 'yandex', 0),
('two', 'yandex', 1),
('two', 'yandex', 2),
('two', 'yandex', 3),
('two', 'yandex', 4)]
But sometimes you want to fine tune this generic behaviour. Some keywords should be scraped on
only some search engines. Some keywords should be only used with specific proxies. Maybe
a specific keyword should be searched Y times, whereas another needs to be scraped X times.
Therefore we need am special format, where you can specify the single settings for each
keyword.
The best format for such a keyword file is just a python module with a dictionary with one
mandatory key: The 'query'. The dictionary must be called 'scrape_jobs'.
You can see such a example file in the examples/ directory.
"""
def default_scrape_jobs_for_keywords(keywords, search_engines, scrape_method, num_pages):
"""Get scrape jobs by keywords.
If you just submit a keyword file, then it is assumed that every keyword
should be scraped on
- all supplied search engines
- for num_pages
- in the specified search mode.
Args:
keywords: A set of keywords to scrape.
Returns:
A dict of scrapejobs.
"""
for keyword in keywords:
for search_engine in search_engines:
for page in range(1, num_pages + 1):
yield {
'query': keyword,
'search_engine': search_engine,
'scrape_method': scrape_method,
'page_number': page
} | PypiClean |
/CloeePy-Redis-0.0.0.tar.gz/CloeePy-Redis-0.0.0/README.md | # CloeePy-Redis
Redis Plugin for the CloeePy Framework
Attaches a Redis connection to CloeePy application context.
## Installation
`pip install CloeePy-Redis`
## Configuration
### Configuration Basics
CloeePy-Redis configuration must be placed under `CloeePy.Plugins.cloeepy_redis` in your config file.
The parameters are simply the available `Redis-Py.StrictRedis` connection parameters. For more
information on possible configurations please see [Redis-Py's Documentation](http://redis-py.readthedocs.io/en/latest/)
```
CloeePy:
...
Plugins:
cloeepy_redis:
host: localhost
port: "6379"
password: "secret"
```
### Customize Plugin Namespace
By default, your Redis connection is available on the CloeePy application context as
`app.redis`. Optionally you can specify a different namespace by which you access
the redis connection via `pluginNamespace`.
```
...
Plugins:
cloeepy_redis:
pluginNamespace: customRedisNS
host: localhost
port: "6379"
password: "secret"
```
Then, you would access your Redis connection on the application context like so:
```
app = CloeePy()
result = app.customRedisNS.ping()
app.log.info(result)
```
### Optional Environment Variables
It's best practice NOT to store sensitive data, such as database usernames and passwords,
in plain-text configuration files. Thus, CloeePy-Redis supports configuring your
password via environment variable.
You need to set the following:
- Password: `CLOEEPY_REDIS_PASSWORD`
By doing so, you can omit `password` in your configuration file.
## Usage
```
import os
from cloeepy import CloeePy
if __name__ == "__main__":
# Required: set config path as environment variable
os.environ["CLOEEPY_CONFIG_PATH"] = "./example-config.yml"
# instantiate application instance
app = CloeePy()
# Make Redis call and log to stdout
app.log.info(app.redis.ping())
```
| PypiClean |
/Antares_Launcher-1.3.0.tar.gz/Antares_Launcher-1.3.0/antareslauncher/parameters_reader.py | import json
import os.path
from pathlib import Path
from typing import Dict, Any
import yaml
import getpass
from antareslauncher.main import MainParameters
from antareslauncher.main_option_parser import ParserParameters
ALT2_PARENT = Path.home() / "antares_launcher_settings"
ALT1_PARENT = Path.cwd()
DEFAULT_JSON_DB_NAME = f"{getpass.getuser()}_antares_launcher_db.json"
class ParametersReader:
class EmptyFileException(TypeError):
pass
class MissingValueException(KeyError):
pass
def __init__(self, json_ssh_conf: Path, yaml_filepath: Path):
self.json_ssh_conf = json_ssh_conf
with open(Path(yaml_filepath)) as yaml_file:
self.yaml_content = yaml.load(yaml_file, Loader=yaml.FullLoader) or {}
# fmt: off
self._wait_time = self._get_compulsory_value("DEFAULT_WAIT_TIME")
self.time_limit = self._get_compulsory_value("DEFAULT_TIME_LIMIT")
self.n_cpu = self._get_compulsory_value("DEFAULT_N_CPU")
self.studies_in_dir = os.path.expanduser(self._get_compulsory_value("STUDIES_IN_DIR"))
self.log_dir = os.path.expanduser(self._get_compulsory_value("LOG_DIR"))
self.finished_dir = os.path.expanduser(self._get_compulsory_value("FINISHED_DIR"))
self.ssh_conf_file_is_required = self._get_compulsory_value("SSH_CONFIG_FILE_IS_REQUIRED")
# fmt: on
alt1, alt2 = self._get_ssh_conf_file_alts()
self.ssh_conf_alt1, self.ssh_conf_alt2 = alt1, alt2
self.default_ssh_dict = self._get_ssh_dict_from_json()
self.remote_slurm_script_path = self._get_compulsory_value("SLURM_SCRIPT_PATH")
self.antares_versions = self._get_compulsory_value(
"ANTARES_VERSIONS_ON_REMOTE_SERVER"
)
self.db_primary_key = self._get_compulsory_value("DB_PRIMARY_KEY")
self.json_dir = Path(self._get_compulsory_value("JSON_DIR")).expanduser()
self.json_db_name = self.yaml_content.get(
"DEFAULT_JSON_DB_NAME", DEFAULT_JSON_DB_NAME
)
def get_parser_parameters(self):
options = ParserParameters(
default_wait_time=self._wait_time,
default_time_limit=self.time_limit,
default_n_cpu=self.n_cpu,
studies_in_dir=self.studies_in_dir,
log_dir=self.log_dir,
finished_dir=self.finished_dir,
ssh_config_file_is_required=self.ssh_conf_file_is_required,
ssh_configfile_path_alternate1=self.ssh_conf_alt1,
ssh_configfile_path_alternate2=self.ssh_conf_alt2,
)
return options
def get_main_parameters(self) -> MainParameters:
main_parameters = MainParameters(
json_dir=self.json_dir,
default_json_db_name=self.json_db_name,
slurm_script_path=self.remote_slurm_script_path,
antares_versions_on_remote_server=self.antares_versions,
default_ssh_dict=self.default_ssh_dict,
db_primary_key=self.db_primary_key,
)
return main_parameters
def _get_ssh_conf_file_alts(self):
default_alternate1, default_alternate2 = self._get_default_alternate_values()
ssh_conf_alternate1 = self.yaml_content.get(
"SSH_CONFIGFILE_PATH_ALTERNATE1",
default_alternate1,
)
ssh_conf_alternate2 = self.yaml_content.get(
"SSH_CONFIGFILE_PATH_ALTERNATE2",
default_alternate2,
)
return ssh_conf_alternate1, ssh_conf_alternate2
def _get_default_alternate_values(self):
default_ssh_configfile_name = self._get_compulsory_value(
"DEFAULT_SSH_CONFIGFILE_NAME"
)
default_alternate1 = ALT1_PARENT / default_ssh_configfile_name
default_alternate2 = ALT2_PARENT / default_ssh_configfile_name
return default_alternate1, default_alternate2
def _get_compulsory_value(self, key: str):
try:
value = self.yaml_content[key]
except KeyError as e:
print(f"missing value: {str(e)}")
raise ParametersReader.MissingValueException(e) from None
return value
def _get_ssh_dict_from_json(self) -> Dict[str, Any]:
with open(self.json_ssh_conf) as ssh_connection_json:
ssh_dict = json.load(ssh_connection_json)
if "private_key_file" in ssh_dict:
ssh_dict["private_key_file"] = os.path.expanduser(
ssh_dict["private_key_file"]
)
return ssh_dict | PypiClean |
/NeuroDynamics-0.1.1.tar.gz/NeuroDynamics-0.1.1/brainpy/tools/dicts.py |
import copy
__all__ = [
'DictPlus'
]
class DictPlus(dict):
"""Python dictionaries with advanced dot notation access.
For example:
>>> d = DictPlus({'a': 10, 'b': 20})
>>> d.a
10
>>> d['a']
10
>>> d.c # this will raise a KeyError
KeyError: 'c'
>>> d.c = 30 # but you can assign a value to a non-existing item
>>> d.c
30
"""
def __init__(self, *args, **kwargs):
object.__setattr__(self, '__parent', kwargs.pop('__parent', None))
object.__setattr__(self, '__key', kwargs.pop('__key', None))
for arg in args:
if not arg:
continue
elif isinstance(arg, dict):
for key, val in arg.items():
self[key] = self._hook(val)
elif isinstance(arg, tuple) and (not isinstance(arg[0], tuple)):
self[arg[0]] = self._hook(arg[1])
else:
for key, val in iter(arg):
self[key] = self._hook(val)
for key, val in kwargs.items():
self[key] = self._hook(val)
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
raise AttributeError(f"Attribute '{name}' is read-only in '{type(self)}' object.")
else:
self[name] = value
def __setitem__(self, name, value):
super(DictPlus, self).__setitem__(name, value)
try:
p = object.__getattribute__(self, '__parent')
key = object.__getattribute__(self, '__key')
except AttributeError:
p = None
key = None
if p is not None:
p[key] = self
object.__delattr__(self, '__parent')
object.__delattr__(self, '__key')
def __add__(self, other):
if not self.keys():
return other
else:
self_type = type(self).__name__
other_type = type(other).__name__
msg = "Unsupported operand type(s) for +: '{}' and '{}'"
raise TypeError(msg.format(self_type, other_type))
@classmethod
def _hook(cls, item):
if isinstance(item, dict):
return cls(item)
elif isinstance(item, (list, tuple)):
return type(item)(cls._hook(elem) for elem in item)
return item
def __getattr__(self, item):
return self.__getitem__(item)
def __delattr__(self, name):
del self[name]
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
def __deepcopy__(self, memo):
other = self.__class__()
memo[id(self)] = other
for key, value in self.items():
other[copy.deepcopy(key, memo)] = copy.deepcopy(value, memo)
return other
def to_dict(self):
base = {}
for key, value in self.items():
if isinstance(value, type(self)):
base[key] = value.to_dict()
elif isinstance(value, (list, tuple)):
base[key] = type(value)(item.to_dict() if isinstance(item, type(self)) else item
for item in value)
else:
base[key] = value
return base
def update(self, *args, **kwargs):
other = {}
if args:
if len(args) > 1:
raise TypeError()
other.update(args[0])
other.update(kwargs)
for k, v in other.items():
if (k not in self) or (not isinstance(self[k], dict)) or (not isinstance(v, dict)):
self[k] = v
else:
self[k].update(v)
def __getnewargs__(self):
return tuple(self.items())
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
def setdefault(self, key, default=None):
if key in self:
return self[key]
else:
self[key] = default
return default | PypiClean |
/CASTLE-tools-1.1.tar.gz/CASTLE-tools-1.1/CASTLE/loc_alignment.py | import os
import numpy as np
import anndata as ad
import matplotlib.pyplot as plt
from .location.align_init import initial_alignment
from .location.align_fine import fine_alignment
from .location.edge_detection import alpha_shape, calcu_lisi, select_clustered_domains, detect_edge_of_domains
from .utils import MakeLogClass
class Loc_Align(object):
"""
Location alignment of multiplt ST slices, including initial alignment and fine alignment.
They perform spatial alignmentusing similarity of spatial embedding and spatial coordinates, respectively.
Parameters
----------
adata
AnnData object object of scanpy package
batch_key
The key containing slice information in .obs
batch_order
Slice order used to perform alignment. Align according to the default order of elements in batch_key if None
Examples
--------
>>> adata = sc.read_h5ad(path_to_anndata)
>>> loc_align = Loc_Align(adata, batch_key='batch')
>>> loc_align.init_align(emb_key = 'HAN_SE')
>>> loc_align.detect_edge_fine_align(domain_key = 'layer_cluster')
>>> loc_align.plot_edge(spatial_key = 'transform_init')
>>> adata_aligned = loc_align.fine_align()
"""
def __init__(
self,
adata,
batch_key,
batch_order = None,
make_log = True,
result_path = '.'
):
super(Loc_Align, self).__init__()
self.batch_key = batch_key
if batch_order is None:
batch_order = list(adata.obs[batch_key].value_counts().sort_index().index)
self.batch_n = len(batch_order)
self.adata_list = [adata[adata.obs[batch_key]==key].copy() for key in batch_order]
self.make_log = make_log
self.result_path = result_path
if self.make_log:
self.makeLog = MakeLogClass(f"{self.result_path}/log_loc.tsv").make
def init_align(self,
emb_key,
spatial_key = 'spatial',
num_mnn = 1,
init_align_key = 'transform_init',
return_result = False
):
'''
Initial alignment of spatial location.
Parameters
----------
emb_key
AnnData object object of scanpy package
spatial_key
Key of raw spatial coordinates in .obsm
num_mnn
The number of mutual nearest neighbors calculated according to emb_key
init_align_key
Key of initial transformed coordinates added in .obsm
'''
self.init_align_key = init_align_key
self.init_adatas, anchors, self.Ts_init = initial_alignment(self.adata_list,
spatial_key = spatial_key,
emb_key = emb_key,
num_mnn = num_mnn,
key_added = init_align_key
)
# self.boundary = [(anchor[:,0].tolist(), anchor[:,1].tolist()) for anchor in anchors]
if self.make_log:
self.makeLog(f"Parameter set for initial alignment")
self.makeLog(f" Starting coordinates: {spatial_key}")
self.makeLog(f" K of MNN: {num_mnn}")
self.makeLog(f" Aligned coordinates: {init_align_key}")
if return_result:
return anchors, self.Ts_init
def detect_fine_points( self,
slice_boundary = True,
domain_boundary = True,
domain_key = 'layer_cluster',
num_domains = 1,
sep_sort = True,
alpha = 70,
return_result = False):
'''
Prepare for fine alignment.
First, the spatial domain with the highest degree of spatial aggregation is selected according to the index LISI,
and then the edge of the slice and the aforementioned spatial domain is detected.
Parameters
----------
domain_key
Key of spatial domains in .obs
num_domains
Number of domains used to aligning slices.
sep_sort
Boolean value, whether to sort spatial clustered pattern together.
alpha
alpha value to detect the edge of slices and dimains.
'''
self.alpha = alpha
self.boundary = []
self.edge = []
# detect edge of slices
if slice_boundary:
boundary_slices, edge_slices = [], []
for ii in range(len(self.init_adatas)):
if slice_boundary:
adata_tmp = self.init_adatas[ii]
spatial_tmp = adata_tmp.obsm[self.init_align_key]
boundary_tmp, edge_tmp, _ = alpha_shape(spatial_tmp, alpha=alpha, only_outer=True)
else:
boundary_tmp, edge_tmp = [], set()
boundary_slices.append(boundary_tmp)
edge_slices.append(edge_tmp)
if ii !=0 :
self.boundary += [(boundary_slices[ii-1], boundary_slices[ii])]
self.edge += [(edge_slices[ii-1], edge_slices[ii])]
# detect edge of domains
if domain_boundary:
# calculate lisi of each domains
lisi_list = [calcu_lisi(adata_tmp, domain_key=domain_key, spatial_key=self.init_align_key) for adata_tmp in self.init_adatas]
# sort the domains according to lisi
domains_use = [select_clustered_domains(lisi_list[i],
lisi_list[i+1],
domain_key,
use_domain_nums = num_domains,
sep_sort=sep_sort) for i in range(self.batch_n-1)]
# detect edge of domains
boundary_domain, edge_domain = detect_edge_of_domains( self.init_adatas,
domain_key = domain_key,
domains_use = domains_use,
spatial_key = self.init_align_key,
alpha = alpha)
for ii in range(len(self.boundary)):
boundary_tmp = self.boundary[ii]
print(boundary_tmp)
boundary_tmp1, boundary_tmp2 = boundary_tmp
boundary_tmp1 += boundary_domain[ii][0]
boundary_tmp2 += boundary_domain[ii][1]
boundary_tmp1 = list(set(boundary_tmp1))
boundary_tmp2 = list(set(boundary_tmp2))
self.boundary[ii] = (boundary_tmp1, boundary_tmp2)
edge_tmp1, edge_tmp2 = self.edge[ii]
edge_tmp1 = edge_tmp1.union(edge_domain[ii][0])
edge_tmp2 = edge_tmp2.union(edge_domain[ii][1])
self.edge[ii] = (edge_tmp1, edge_tmp2)
if self.make_log:
self.makeLog(f"Parameter set for edge detection")
self.makeLog(f" Spatial coordinates: {domain_key}")
self.makeLog(f" Number of domains: {num_domains}")
self.makeLog(f" Sep sort: {sep_sort}")
self.makeLog(f" Alpha: {alpha}")
if return_result:
if domain_boundary:
return self.boundary, self.edge, lisi_list, domains_use
return self.boundary, self.edge
def fine_align( self,
fine_align_key = 'transform_fine',
max_iterations = 20,
tolerance = 1e-10,
return_result = False
):
'''
Fine alignment of spatial location.
Parameters
----------
fine_align_key
Key of fine transformed coordinates added in .obsm
max_iterations
Maximum number of iterations for icp
tolerance
Maximum error allowed for early stopping
Return
----------
adata_aligned
Fine aligned adata with 'init_align_key' and 'fine_align_key' added in .obsm
'''
self.fine_adatas, Ts_fine = fine_alignment( self.init_adatas,
self.boundary,
spatial_key=self.init_align_key,
key_added=fine_align_key,
init_pose = None,
max_iterations = max_iterations,
tolerance = tolerance)
adata_aligned = ad.concat(self.fine_adatas)
if self.make_log:
self.makeLog(f"Parameter set for fine alignment")
self.makeLog(f" Starting coordinates: {self.init_align_key}")
self.makeLog(f" Aligned coordinates: {fine_align_key}")
self.makeLog(f" Max iterations: {max_iterations}")
self.makeLog(f" Tolerance: {tolerance}")
if return_result:
return adata_aligned, Ts_fine
return adata_aligned
def plot_edge(self,
spatial_key,
figsize = (6,6),
s=1
):
'''
Plot the detected edges of slices and domains to select an suitable alpha value.
Parameters
----------
spatial_key
Spatial coordinates used for plot in .obsm.
'''
if spatial_key in list(self.init_adatas[0].obsm.keys()):
adatas = self.init_adatas
else:
adatas = self.fine_adatas
### check edges of slices
for ii in range(len(self.boundary)):
# get adata
adata_tmp1 = adatas[ii]
adata_tmp2 = adatas[ii+1]
# get slices
slice_tmp1 = list(set(adata_tmp1.obs[self.batch_key]))[0]
slice_tmp2 = list(set(adata_tmp2.obs[self.batch_key]))[0]
# get boundarys and edges
# boundary_tmp1, boundary_tmp2 = self.boundary[ii]
edge_tmp1, edge_tmp2 = self.edge[ii]
# get coordinates of boundarys
spatial_tmp1 = adata_tmp1.obsm[spatial_key]
spatial_tmp2 = adata_tmp2.obsm[spatial_key]
if not os.path.exists(self.result_path + '/location/edge'):
os.makedirs(self.result_path + '/location/edge')
xx,yy = np.median(spatial_tmp1, 0)
plt.figure(figsize=figsize)
plt.scatter(spatial_tmp1[:, 0], spatial_tmp1[:, 1], s = s)
for i, j in edge_tmp1:
plt.plot(spatial_tmp1[[i, j], 0], spatial_tmp1[[i, j], 1], c='#E24A33')
plt.text(xx, yy, f"alpha={self.alpha}", size=18)
plt.savefig(f'{self.result_path}/location/edge/spatial_edge_{slice_tmp1}_{ii}.png', bbox_inches='tight')
plt.close()
xx,yy = np.median(spatial_tmp2, 0)
plt.figure(figsize=figsize)
plt.scatter(spatial_tmp2[:, 0], spatial_tmp2[:, 1], s = s)
for i, j in edge_tmp2:
plt.plot(spatial_tmp2[[i, j], 0], spatial_tmp2[[i, j], 1], c='#8EBA42')
# plt.plot(spatial_tmp2[[i, j], 0], spatial_tmp2[[i, j], 1], c='#988ED5')
plt.text(xx, yy, f"alpha={self.alpha}", size=18)
plt.savefig(f'{self.result_path}/location/edge/spatial_edge_{slice_tmp2}_{ii}.png', bbox_inches='tight')
plt.close() | PypiClean |
/HPI-0.3.20230327.tar.gz/HPI-0.3.20230327/my/telegram/telegram_backup.py | from dataclasses import dataclass
from datetime import datetime, timezone
from struct import unpack_from, calcsize
import sqlite3
from typing import Dict, Iterator, Optional
from my.core import datetime_aware, PathIsh
from my.core.sqlite import sqlite_connection
from my.config import telegram as user_config
@dataclass
class config(user_config.telegram_backup):
# path to the export database.sqlite
export_path: PathIsh
@dataclass
class Chat:
id: str
name: Optional[str]
# not all users have short handle + groups don't have them either?
# TODO hmm some groups have it -- it's just the tool doesn't dump them??
handle: Optional[str]
# not sure if need type?
@dataclass
class User:
id: str
name: Optional[str]
@dataclass
class Message:
# NOTE: message id is NOT unique globally -- only with respect to chat!
id: int
time: datetime_aware
chat: Chat
sender: User
text: str
extra_media_info: Optional[str] = None
@property
def permalink(self) -> str:
handle = self.chat.handle
if handle is None:
clink = str(self.chat.id)
else:
# FIXME add c/
clink = f'{handle}'
# NOTE: don't think deep links to messages work for private conversations sadly https://core.telegram.org/api/links#message-links
# NOTE: doesn't look like this works with private groups at all, doesn't even jump into it
return f'https://t.me/{clink}/{self.id}'
Chats = Dict[str, Chat]
def _message_from_row(r: sqlite3.Row, *, chats: Chats, with_extra_media_info: bool) -> Message:
ts = r['time']
# desktop export uses UTC (checked by exporting in winter time vs summer time)
# and telegram_backup timestamps seem same as in desktop export
time = datetime.fromtimestamp(ts, tz=timezone.utc)
chat = chats[r['source_id']]
sender = chats[r['sender_id']]
extra_media_info: Optional[str] = None
if with_extra_media_info and r['has_media'] == 1:
# also it's quite hacky, so at least for now it's just an optional attribute behind the flag
# defensive because it's a bit tricky to correctly parse without a proper api parser..
# maybe later we'll improve it
try:
extra_media_info = _extract_extra_media_info(data=r['data'])
except Exception as e:
pass
return Message(
id=r['message_id'],
time=time,
chat=chat,
sender=User(id=sender.id, name=sender.name),
text=r['text'],
extra_media_info=extra_media_info,
)
def messages(*, extra_where: Optional[str]=None, with_extra_media_info: bool=False) -> Iterator[Message]:
messages_query = 'SELECT * FROM messages WHERE message_type NOT IN ("service_message", "empty_message")'
if extra_where is not None:
messages_query += ' AND ' + extra_where
messages_query += ' ORDER BY time'
with sqlite_connection(config.export_path, immutable=True, row_factory='row') as db:
chats: Chats = {}
for r in db.execute('SELECT * FROM chats ORDER BY id'):
chat = Chat(id=r['id'], name=r['name'], handle=None)
assert chat.id not in chats
chats[chat.id] = chat
for r in db.execute('SELECT * FROM users ORDER BY id'):
first = r["first_name"]
last = r["last_name"]
name: Optional[str]
if first is not None and last is not None:
name = f'{first} {last}'
else:
name = first or last
chat = Chat(id=r['id'], name=name, handle=r['username'])
assert chat.id not in chats
chats[chat.id] = chat
for r in db.execute(messages_query):
# seems like the only remaining have message_type = 'message'
yield _message_from_row(r, chats=chats, with_extra_media_info=with_extra_media_info)
def _extract_extra_media_info(data: bytes) -> Optional[str]:
# ugh... very hacky, but it does manage to extract from 90% of messages that have media
pos = 0
def skip(count: int) -> None:
nonlocal pos
pos += count
def getstring() -> str:
# jesus
# https://core.telegram.org/type/string
if data[pos] == 254:
skip(1)
(sz1, sz2, sz3) = unpack_from('BBB', data, offset=pos)
skip(3)
sz = 256 ** 2 * sz3 + 256 * sz2 + sz1
short = 0
else:
(sz, ) = unpack_from('B', data, offset=pos)
skip(1)
short = 1
assert sz > 0, sz
padding = 0 if (sz + short) % 4 == 0 else 4 - (sz + short) % 4
(ss,) = unpack_from(f'{sz}s{padding}x', data, offset=pos)
skip(sz + padding)
try:
return ss.decode('utf8')
except UnicodeDecodeError as e:
raise RuntimeError(f'Failed to decode {ss}') from e
def debug(count: int=10) -> None:
print([hex(x) for x in data[pos: pos + count]])
print([chr(x) for x in data[pos: pos + count]])
header = 'H2xII8xI'
(flags, mid, src, ts) = unpack_from(header, data, offset=pos)
pos += calcsize(header)
# see https://core.telegram.org/constructor/message
has_media = (flags >> 9) & 1
if has_media == 0:
return None
msg_body = getstring()
skip(20)
url1 = getstring()
url2 = getstring()
ss_type = getstring()
# not sure if assert is really necessary her
# assert ss_type in {
# 'article',
# 'photo',
# 'app',
# 'video',
# }, ss_type
link_title = getstring()
link_title_2 = getstring()
link_description = getstring()
return link_description | PypiClean |
/Netzob-2.0.0.tar.gz/Netzob-2.0.0/src/netzob/Model/Grammar/States/State.py |
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Georges Bossert <georges.bossert (a) supelec.fr> |
#| - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
import random
import socket
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, public_api, NetzobLogger
from netzob.Model.Grammar.Transitions.Transition import Transition
from netzob.Model.Grammar.Transitions.OpenChannelTransition import OpenChannelTransition
from netzob.Model.Grammar.States.AbstractState import AbstractState
from netzob.Model.Grammar.Transitions.AbstractTransition import AbstractTransition
from netzob.Model.Grammar.Transitions.CloseChannelTransition import CloseChannelTransition
from netzob.Model.Vocabulary.EmptySymbol import EmptySymbol
from netzob.Model.Vocabulary.UnknownSymbol import UnknownSymbol
from netzob.Simulator.AbstractionLayer import Operation
@NetzobLogger
class State(AbstractState):
"""This class represents a state in an automaton.
The State constructor expects some parameters:
:param name: The name of the state. If `None`, it is set to 'State'.
:type name: :class:`str`, optional
The State class provides the following public variables:
:var name: The name of the state. The default value is 'State'.
:var transitions: The list of outgoing transitions
:vartype name: :class:`str`
:vartype transitions: ~typing.List[~netzob.Model.Grammar.Transitions.Transition.Transition]
The following example shows the definition of an ``s0`` state and an ``s1`` state:
>>> from netzob.all import *
>>> s0 = State()
>>> s0.name
'State'
>>> s1 = State(name="S1")
>>> s1.name
'S1'
"""
@public_api
def __init__(self, name=None):
super(State, self).__init__(name=name)
self.__transitions = []
@public_api
def copy(self):
r"""Copy the current state.
:return: A new object of the same type.
:rtype: :class:`State <netzob.Model.Grammar.States.State.State>`
"""
state = State(name=self.name)
state.transitions = list(self.transitions)
state.active = self.active
state.cbk_modify_transition = list(self.cbk_modify_transition)
state.cbk_filter_transitions = list(self.cbk_filter_transitions)
return state
def execute(self, actor):
self._logger.debug(" [+] At state '{}'".format(self.name))
actor.visit_log.append(" [+] At state '{}'".format(self.name))
# If necessary, filter available transitions
available_transitions = self.__filter_available_transitions(actor, self.transitions)
# Check if the actor has received a message. If so, we execute the step as not an initiator
if actor.abstractionLayer.check_received():
# Check if we should consider reception (i.e. there exists at least one transition in inverseInitiator mode)
should_consider_reception = False
for transition in available_transitions:
if isinstance(transition, Transition):
is_transition_initiator = (actor.initiator and not transition.inverseInitiator) or (not actor.initiator and transition.inverseInitiator)
if is_transition_initiator is False:
should_consider_reception = True
break
if should_consider_reception:
actor.visit_log.append(" [+] At state '{}', received packet on communication channel. Switching to execution as not initiator.".format(self.name))
self._logger.debug("Data received on the communication channel. Switching to execution as not initiator to handle the received message.")
return self.executeAsNotInitiator(actor, available_transitions)
# Else, randomly pick a transition
actor.visit_log.append(" [+] Randomly choosing a transition to execute or to wait for an input symbol")
next_transition = self.__pick_next_transition(actor, available_transitions)
if next_transition is None:
return None
# If transition is in initiator mode
if (actor.initiator and not next_transition.inverseInitiator) or (not actor.initiator and next_transition.inverseInitiator):
# If necessary, modify the current transition
next_transition = self.__modify_current_transition(actor, next_transition, available_transitions)
# Execute next transition as initiator
nextState = self.executeAsInitiator(actor, next_transition)
else:
# Execute next transition as not initiator
nextState = self.executeAsNotInitiator(actor, available_transitions)
return nextState
def executeAsInitiator(self, actor, next_transition):
"""This method picks the next available transition and executes it.
"""
self._logger.debug("[actor='{}'] Execute state {} as an initiator".format(str(actor), self.name))
self.active = True
self._logger.debug("[actor='{}'] Next transition for state '{}': {}.".format(str(actor), self.name, next_transition))
# Execute picked transition as an initiator
try:
nextState = next_transition.executeAsInitiator(actor)
self._logger.debug("[actor='{}'] Transition '{}' leads to state: {}.".format(str(actor), str(next_transition), str(nextState)))
except Exception as e:
self.active = False
raise
if nextState is None:
self._logger.debug("[actor='{}'] The execution of transition '{}' on state '{}' did not return the next state".format(str(actor), str(next_transition), self.name))
self.active = False
return nextState
def executeAsNotInitiator(self, actor, available_transitions):
"""This method executes the current state as not an initiator.
This method will wait for a maximum amount of time the
reception of a symbol and will try to select the appropriate
transition which would be triggered by received symbol. At
the end, if no exception occurs, it returns the next state.
"""
self._logger.debug("[actor='{}'] Execute state {} as a non-initiator".format(str(actor), self.name))
self.active = True
# if no transition exists we quit
if len(self.transitions) == 0:
self._logger.debug("[actor='{}'] The current state '{}' has no transitions available".format(str(actor), self.name))
self.active = False
return None
next_transition = None
nextState = None
# Execute the first special transition (inputSymbolProbability equals 100.0)
for transition in self.transitions:
if transition.inputSymbolProbability == 100.0:
next_transition = transition
# Else, execute the closing transition, if it is the last one remaining
if next_transition is None:
if len(self.transitions) == 1 and self.transitions[
0].TYPE == CloseChannelTransition.TYPE:
next_transition = self.transitions[0]
if next_transition is not None:
actor.visit_log.append(" [+] Going to execute transition '{}'".format(str(next_transition)))
nextState = next_transition.executeAsNotInitiator(actor)
self._logger.debug("[actor='{}'] Transition '{}' leads to state: {}.".format(
str(actor), str(next_transition), str(nextState)))
if nextState is None:
self.active = False
raise Exception(
"The execution of transition '{}' on state '{}' did not return the next state.".
format(next_transition.name, self.name))
return nextState
# Else, we wait to receive a symbol
received_symbol = None
received_message = None
from netzob.Simulator.Actor import ActorStopException
try:
(received_symbol, received_message, received_structure) = actor.abstractionLayer.readSymbol()
if received_symbol is None:
raise Exception("The abstraction layer returned a None received symbol")
self._logger.debug("[actor='{}'] Input symbol: '{}'".format(str(actor), str(received_symbol)))
# Find the transition which accepts the received symbol as an input symbol, along with the correct input symbol preset
next_transition = None
for transition in self.transitions:
is_transition_initiator = (actor.initiator and not transition.inverseInitiator) or (not actor.initiator and transition.inverseInitiator)
if is_transition_initiator:
continue
if transition.type == Transition.TYPE and id(transition.inputSymbol) == id(received_symbol):
if transition.inputSymbolPreset is not None:
self._logger.debug("Checking input symbol preset")
# Check preset
if received_symbol.check_preset(received_structure, transition.inputSymbolPreset):
self._logger.debug("Receive good symbol with good preset setting")
actor.visit_log.append(" [+] Received one of the expected symbols ('{}'), with good preset settings ('{}')".format(received_symbol, transition.inputSymbolPreset))
next_transition = transition
break
else:
next_transition = transition
break
actor.visit_log.append(" [+] Input symbol '{}' corresponds to transition '{}'".format(str(received_symbol), str(next_transition)))
except ActorStopException:
raise
except socket.timeout:
self._logger.debug("[actor='{}'] In state '{}', timeout on abstractionLayer.readSymbol()".format(str(actor), self.name))
# Check if there is a transition with an EmptySymbol as input symbol
self._logger.debug("[actor='{}'] Check if a transition expects an EmptySymbol as input symbol".format(str(actor)))
next_transition = None
for transition in self.transitions:
if transition.type == Transition.TYPE and isinstance(transition.inputSymbol, EmptySymbol):
self._logger.debug("[actor='{}'] The transition '{}' expects an EmptySymbol as input symbol ".format(str(actor), str(transition)))
next_transition = transition
actor.visit_log.append(" [+] Receiving no symbol (EmptySymbol) corresponds to transition '{}'".format(str(next_transition)))
break
else:
self._logger.debug("[actor='{}'] No transition expects an EmptySymbol as input symbol".format(str(actor)))
self.active = False
if actor.automata.cbk_read_symbol_timeout is not None:
actor.automata.cbk_read_symbol_timeout(self, None)
# Returning None here will stop the actor
return
except OSError as e:
self._logger.debug("[actor='{}'] The underlying abstraction channel seems to be closed, so we stop the current actor".format(str(actor)))
return
except Exception as e:
self._logger.debug("[actor='{}'] An exception occured when waiting for a symbol at state '{}': '{}'".format(str(actor), self.name, e))
self.active = False
raise
# If a callback function is defined, we call it in order to execute an external program that may change the selected transition
next_transition = self.__modify_current_transition(actor, next_transition, available_transitions)
# Execute the retained transition
if next_transition is None:
self._logger.debug("[actor='{}'] The received symbol did not match any of the registered transition".format(str(actor)))
#nextState = self
# Handle case where received symbol is unknown
if isinstance(received_symbol, UnknownSymbol):
if actor.automata.cbk_read_unknown_symbol is not None:
actor.automata.cbk_read_unknown_symbol(self,
None,
received_message)
else:
raise Exception("The received message is unknown")
# Handle case where received symbol is known but unexpected
else:
if actor.automata.cbk_read_unexpected_symbol is not None:
actor.automata.cbk_read_unexpected_symbol(self,
None,
received_symbol,
received_message,
received_structure)
else:
raise Exception("The received symbol did not match any of expected symbols, for actor '{}'".format(actor))
else:
for cbk in next_transition.cbk_action:
self._logger.debug("[actor='{}'] A callback function is defined at the end of transition '{}'".format(str(actor), next_transition.name))
cbk(received_symbol, received_message, received_structure, Operation.ABSTRACT, self, actor.memory)
nextState = next_transition.executeAsNotInitiator(actor)
self._logger.debug("[actor='{}'] Transition '{}' leads to state: {}.".format(str(actor), str(next_transition), str(nextState)))
self.active = False
return nextState
def __pick_next_transition(self, actor, available_transitions):
"""Returns the next transition by considering the priority (inputSymbolProbability) of the transition and a random choice.
It can return None.
:return: the next transition or None if no transition available
:rtype: :class:`AbstractTransition <netzob.Model.Grammar.Transition.AbstractTransition.AbstractTransition>`
"""
# create a dictionary to host the available transition
prioritizedTransitions = dict()
for transition in available_transitions:
# Handle transition priority (inputSymbolProbability)
if transition.inputSymbolProbability in list(prioritizedTransitions.keys()):
prioritizedTransitions[transition.inputSymbolProbability].append(transition.copy())
else:
prioritizedTransitions[transition.inputSymbolProbability] = [transition.copy()]
if len(prioritizedTransitions) == 0:
return None
list_probabilities = sorted(prioritizedTransitions.keys())
list_probabilities = list_probabilities[::-1]
available_transitions = prioritizedTransitions[list_probabilities[0]]
# Randomly select the next transition
next_transition = random.choice(available_transitions)
# Log initiator mode
if isinstance(next_transition, Transition):
is_transition_initiator = (actor.initiator and not next_transition.inverseInitiator) or (not actor.initiator and next_transition.inverseInitiator)
if is_transition_initiator:
actor.visit_log.append(" [+] Picking transition '{}' (initiator)".format(next_transition))
else:
actor.visit_log.append(" [+] Waiting for an input symbol to decide the transition (not initiator)")
elif isinstance(next_transition, OpenChannelTransition):
initiator_mode = "open channel"
actor.visit_log.append(" [+] Picking transition '{}' ({})".format(next_transition, initiator_mode))
else:
initiator_mode = "close channel"
actor.visit_log.append(" [+] Picking transition '{}' ({})".format(next_transition, initiator_mode))
return next_transition
def __modify_current_transition(self, actor, current_transition, available_transitions):
r"""If a callback function is defined, we call it in order to execute
an external program that may change the selected transition.
"""
self._logger.debug("[actor='{}'] Test if a callback function is defined at state '{}'".format(actor, self.name))
for cbk in self.cbk_modify_transition:
self._logger.debug("[actor='{}'] A callback function is defined at state '{}'".format(actor, self.name))
available_transitions = [cloned_transition.copy() for cloned_transition in available_transitions]
current_transition = cbk(available_transitions,
current_transition,
self,
actor.abstractionLayer.last_sent_symbol,
actor.abstractionLayer.last_sent_message,
actor.abstractionLayer.last_sent_structure,
actor.abstractionLayer.last_received_symbol,
actor.abstractionLayer.last_received_message,
actor.abstractionLayer.last_received_structure,
actor.memory)
is_transition_initiator = (actor.initiator and not current_transition.inverseInitiator) or (not actor.initiator and current_transition.inverseInitiator)
if is_transition_initiator:
transition_mode = "initiator"
else:
transition_mode = "not initiator"
actor.visit_log.append(" [+] Changing transition to '{}' ({}), through callback".format(current_transition, transition_mode))
else:
self._logger.debug("[actor='{}'] No callback function is defined at state '{}'".format(actor, self.name))
return current_transition
def __filter_available_transitions(self, actor, available_transitions):
r"""If a callback function is defined, we call it in order to execute
an external program that may change the available transitions.
"""
self._logger.debug("[actor='{}'] Test if a callback function is defined at state '{}'".format(actor, self.name))
for cbk in self.cbk_filter_transitions:
self._logger.debug("[actor='{}'] A callback function is defined at state '{}'".format(actor, self.name))
available_transitions = [cloned_transition.copy() for cloned_transition in available_transitions]
available_transitions = cbk(available_transitions,
self,
actor.abstractionLayer.last_sent_symbol,
actor.abstractionLayer.last_sent_message,
actor.abstractionLayer.last_sent_structure,
actor.abstractionLayer.last_received_symbol,
actor.abstractionLayer.last_received_message,
actor.abstractionLayer.last_received_structure,
actor.memory)
actor.visit_log.append(" [+] Filtering available transitions through callback")
else:
self._logger.debug("[actor='{}'] No callback function is defined at state '{}'".format(actor, self.name))
return available_transitions
@typeCheck(AbstractTransition)
def removeTransition(self, transition):
"""remove the specified transition from the list
of transition which starts on the current state.
:param transition: the transition to remove
:type transition: :class:`Transition <netzob.Model.Grammar.Transitions.Transition.Transition>`
:raise: TypeError if param is not a Transition and a ValueError if the transition
is not registered
"""
if transition not in self.__transitions:
raise ValueError("The transition is not associated to the current state so cannot be removed.")
self.__transitions.remove(transition)
@public_api
@property
def transitions(self):
return self.__transitions
@transitions.setter # type: ignore
def transitions(self, transitions):
self.__transitions = transitions
def _test():
r"""
>>> from netzob.all import *
>>> s0 = State()
>>> s0.name
'State'
>>> s1 = State(name="S1")
>>> s1.name
'S1'
>>> t = Transition(s0, s1, None, None)
>>> t.startState.name
'State'
>>> t.endState.name
'S1'
>>> len(s0.transitions)
1
>>> s0.transitions[0].startState.name
'State'
>>> s0.transitions[0].endState.name
'S1'
# Test copy()
>>> from netzob.all import *
>>> s0 = State(name="s0")
>>> s1 = State(name="s1")
>>> t = CloseChannelTransition(s0, s1, name="transition")
>>> s0.copy()
s0
""" | PypiClean |
/Credentials_Validator-0.0.4.tar.gz/Credentials_Validator-0.0.4/Credentials_Validator/Validators.py | class Validator:
def __init__(self, length, chars, Chars, nums, symbols, **kwargs):
self.text = '' # will be the .verify() input
self.length = length
if len(self.length) < 2:
self.length.append(float('inf')) # set length second element to infinity if not present
self.chars = chars # lower-case
self.Chars = Chars # upper-case
self.nums = nums
self.symbols = symbols
self.symbols_list = [s for s in kwargs.get('symbols_list',
'!"#$%&\'()*+,-./:;<=>?@[\\]^_{|}~')]
# list the symbols (default or argument)
@staticmethod
def __safe_get(l, index, default): # get from list with default value
try:
return l[index]
except IndexError:
return default
def __check(self, func, limit): # check if a type of character is present enough times
if limit:
chars = 0
for char in self.text:
chars += 1 if func(char) else 0 # count
return not limit[0] <= chars <= self.__safe_get(limit, 1, self.length[1]) # check if in bounds
return False
def extra_validation(self, text): # need to be overwritten
raise NotImplementedError('Extra validation not implemented')
def verify(self, text: str):
self.text = text
extra = self.extra_validation(self.text) # call extra_validation
if extra: # if response is not None
return extra # return error
if not self.length[0] <= len(self.text) <= self.length[1]: # check for length
return False, 'length'
if self.__check(lambda c: c.islower(), self.chars): # check for lower-case
return False, 'lower'
if self.__check(lambda c: c.isupper(), self.Chars): # check for upper-case
return False, 'upper'
if self.__check(lambda c: c.isdigit(), self.nums): # check for digits
return False, 'digit'
if self.__check(lambda c: c in self.symbols_list, self.symbols): # check for symbols
return False, 'symbols'
return True, '' # if all verifications passed
class UsernameValidator(Validator):
def __init__(self, length, chars, Chars, nums, symbols, **kwargs):
super().__init__(length, chars, Chars, nums, symbols, **kwargs)
self.django = kwargs.get('django_model', None) # add django argument
def extra_validation(self, text):
model = self.django
if model:
if model.objects.filter(username=text): # check in the database
return False, 'existing'
return None
class PasswordValidator(Validator):
def __init__(self, length, chars, Chars, nums, symbols, **kwargs):
super().__init__(length, chars, Chars, nums, symbols, **kwargs)
self.username = kwargs.get('username', None) # add username argument
def extra_validation(self, text):
if text == self.username: # check if is equal
return False, 'equal'
return None | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/STIX-Web/Misc/BoldItalic/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS["STIXMathJax_Misc-bold-italic"]={directory:"Misc/BoldItalic",family:"STIXMathJax_Misc",weight:"bold",style:"italic",testString:"\u00A0\u0250\u0251\u0252\u0253\u0254\u0255\u0256\u0257\u0258\u0259\u025A\u025B\u025C\u025D",32:[0,0,250,0,0],160:[0,0,250,0,0],592:[473,14,512,13,492],593:[473,14,612,25,592],594:[473,14,612,25,592],595:[691,13,500,-14,449],596:[462,13,444,-5,392],597:[462,157,444,-5,406],598:[699,233,500,-21,517],599:[683,13,570,-21,653],600:[462,13,444,5,421],601:[462,13,444,5,398],602:[462,13,626,5,626],603:[475,14,444,5,482],604:[475,14,480,5,469],605:[475,14,689,5,689],606:[475,14,486,7,475],607:[462,207,367,-100,364],608:[683,245,720,-52,751],609:[472,245,549,-52,520],610:[462,11,561,21,544],611:[462,234,444,20,400],612:[450,10,493,10,488],613:[459,249,556,-13,498],614:[683,9,556,-13,498],615:[683,205,533,-13,475],616:[684,9,278,-10,262],617:[456,8,253,2,237],618:[462,0,304,-32,321],619:[699,9,320,9,368],620:[699,9,445,17,417],621:[699,233,291,-47,290],622:[699,236,623,2,585],623:[462,9,778,-14,723],624:[462,233,778,-14,723],625:[462,233,759,-14,704],626:[462,233,694,-109,632],627:[462,233,505,-6,486],628:[462,12,588,-27,614],629:[462,13,500,-3,441],630:[462,5,749,23,751],631:[477,2,685,-3,626],632:[685,231,691,-3,632],633:[462,0,427,0,410],634:[699,0,493,0,476],635:[462,233,436,0,417],636:[462,233,389,-87,389],637:[462,233,389,-47,389],638:[484,0,360,-21,417],639:[484,0,338,10,292],640:[464,0,498,8,515],641:[464,0,498,8,597],642:[462,218,389,-32,333],643:[683,233,424,-104,584],644:[683,207,394,-90,576],645:[470,233,415,79,344],646:[683,243,521,-40,641],647:[513,90,310,7,299],648:[594,233,311,-60,281],649:[462,9,556,-16,514],650:[452,8,500,15,552],651:[462,10,534,18,492],652:[462,13,444,15,401],653:[462,13,667,15,614],654:[667,0,444,16,502],655:[464,0,633,65,606],656:[449,218,440,-24,405],657:[449,97,411,-24,376],658:[450,236,499,-10,558],659:[450,307,499,-10,528],660:[685,0,530,25,520],661:[685,0,530,65,509],662:[669,14,487,25,453],663:[462,237,479,20,544],664:[680,17,723,13,734],665:[464,0,493,-10,486],666:[475,14,465,16,504],667:[538,11,580,29,690],668:[464,0,582,21,676],669:[685,233,475,-50,463],670:[457,250,500,22,528],671:[464,0,485,10,468],672:[582,205,488,1,674],673:[685,0,530,25,520],674:[685,0,530,65,507],675:[699,13,750,-21,735],676:[699,236,820,-21,813],677:[699,97,817,-21,743],678:[594,13,560,-3,524],679:[683,233,453,-30,670],680:[594,18,600,-3,618],8355:[669,0,668,-13,661],8356:[683,12,500,-32,510],8359:[669,13,1229,-28,1173],8364:[681,17,562,34,546],9312:[690,19,695,0,695],9313:[690,19,695,0,695],9314:[690,19,695,0,695],9315:[690,19,695,0,695],9316:[690,19,695,0,695],9317:[690,19,695,0,695],9318:[690,19,695,0,695],9319:[690,19,695,0,695],9320:[690,19,695,0,695],9398:[690,19,695,0,695],9399:[690,19,695,0,695],9400:[690,19,695,0,695],9401:[690,19,695,0,695],9402:[690,19,695,0,695],9403:[690,19,695,0,695],9404:[690,19,695,0,695],9405:[690,19,695,0,695],9406:[690,19,695,0,695],9407:[690,19,695,0,695],9408:[690,19,695,0,695],9409:[690,19,695,0,695],9410:[690,19,695,0,695],9411:[690,19,695,0,695],9412:[690,19,695,0,695],9413:[690,19,695,0,695],9414:[690,19,695,0,695],9415:[690,19,695,0,695],9417:[690,19,695,0,695],9418:[690,19,695,0,695],9419:[690,19,695,0,695],9420:[690,19,695,0,695],9421:[690,19,695,0,695],9422:[690,19,695,0,695],9423:[690,19,695,0,695],9424:[690,19,695,0,695],9425:[690,19,695,0,695],9426:[690,19,695,0,695],9427:[690,19,695,0,695],9428:[690,19,695,0,695],9429:[690,19,695,0,695],9430:[690,19,695,0,695],9431:[690,19,695,0,695],9432:[690,19,695,0,695],9433:[690,19,695,0,695],9434:[690,19,695,0,695],9435:[690,19,695,0,695],9436:[690,19,695,0,695],9437:[690,19,695,0,695],9438:[690,19,695,0,695],9439:[690,19,695,0,695],9440:[690,19,695,0,695],9441:[690,19,695,0,695],9442:[690,19,695,0,695],9443:[690,19,695,0,695],9444:[690,19,695,0,695],9445:[690,19,695,0,695],9446:[690,19,695,0,695],9447:[690,19,695,0,695],9448:[690,19,695,0,695],9449:[690,19,695,0,695],9450:[690,19,695,0,695]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"STIXMathJax_Misc-bold-italic"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Misc/BoldItalic/Main.js"]); | PypiClean |
/GRFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/DataScaler.py | from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import os
import joblib
import json
class StdScaler():
"""
采用 sklearn 的 StandardScaler 的数据标准化工具,其归一化原理为:
先通过计算数据集中特征的均值、标准差,对每个特征进行独立居中和缩放;
然后,将平均值和标准偏差存储起来,在以后的测试集上有相同比例来缩放。
标准化是对列操作的,一维数组每列中只有一个值,无法计算。解决办法是,
通过reshape(-1, 1),将一维数组改为二维数组。
"""
def __init__(self, ID: str):
"""设置标准缩放器的基本配置。
Args
----
+ ID(str): 定标器ID
"""
self._scaler = StandardScaler()
self._id = ID
self._fitted = False # 标准缩放器是否训练好的标识
def fit_transform(self, train_df: pd.DataFrame) -> np.array:
"""计算并存储数据集各列的均值、标准差,并对数据集执行标准化。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
Returns
----
返回标准化之后的数据集。
"""
self._fitted = False
train_df_scaled = self._scaler.fit_transform(train_df)
self._fitted = True
return train_df_scaled
def fit(self, train_df: pd.DataFrame):
"""计算并存储数据集各列的均值、标准差。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
"""
self._fitted = False
self._scaler.fit(train_df)
self._fitted = True
def partial_fit(self, train_df: pd.DataFrame):
"""计算并存储数据集各列的均值、标准差(可以保留之前训练结果作增量训练)。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
"""
self._scaler.partial_fit(train_df)
self._fitted = True
def transform(self, test_df: pd.DataFrame) -> np.array:
"""以已经训练好的标准缩放器,通过居中和缩放执行标准化。
Args
----
+ test_df(pd.DataFrame): 测试数据集;
Returns
----
返回标准化之后的数据集; 如果没有训练好的缩放器, 则返回None。
"""
if not self._fitted:
print(f"ERROR: StdScaler({self._id}) is not fitted yet.")
return None
test_df_scaled = self._scaler.transform(test_df)
return test_df_scaled
def inverse_transform(self, pred_arr: np.array) -> np.array:
"""以已经训练好的标准缩放器,将数据按比例恢复到以前的大小。
Args
----
+ pred_arr(np.array): 标准化后的数据集;
Returns
----
返回逆标准化后的数据集; 如果没有训练好的缩放器, 则返回None。
"""
if not self._fitted:
print(f"ERROR: StdScaler({self._id}) is not fitted yet.")
return None
pred_arr_anti = self._scaler.inverse_transform(pred_arr)
return pred_arr_anti
def is_fitted(self) -> bool:
"""缩放器是否经过训练。
"""
return self._fitted
def save(self, scaler_file: str, property_file: str):
"""将缩放器保存到本地。
Args
----
+ scaler_file(str): 保存文件名(.pkl文件, 完整路径);
+ property_file(str): 保存缩放器器属性文件名(.json文件, 完整路径);
"""
# 保持缩放器。
scaler_path = os.path.dirname(scaler_file)
if not os.path.exists(scaler_path):
os.makedirs(scaler_path)
joblib.dump(self._scaler, scaler_file)
# 保存缩放器属性。
property_path = os.path.dirname(property_file)
if not os.path.exists(property_path):
os.makedirs(property_path)
with open(property_file, 'w', encoding='utf8') as fo:
json.dump({"fitted": self._fitted}, fo)
def set_scaler(self, scaler: StandardScaler, fitted: bool):
"""直接设置(训练好的)数据缩放器。
Args
----
+ scaler(StandardScaler): 训练好的缩放器;
+ fitted(bool): 缩放器是否是训练过;
"""
self._scaler = scaler
self._fitted = fitted
@staticmethod
def load(ID: str, scaler_file: str, property_file: str):
"""从本地加载到缩放器。
Args
----
+ ID(str): 定标器ID;
+ scaler_file(str): 本地缩放器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存缩放器器属性文件名(.json文件, 完整路径);
"""
with open(property_file, 'r', encoding='utf8') as fi:
encoder_properties = json.load(fi)
fitted = encoder_properties['fitted']
scaler = StdScaler(ID)
scaler.set_scaler(joblib.load(scaler_file), fitted)
return scaler | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/optimizations/OptimizeBuiltinCalls.py | from nuitka.__past__ import xrange
from nuitka.Errors import NuitkaAssumptionError
from nuitka.nodes.AttributeNodes import (
ExpressionBuiltinGetattr,
ExpressionBuiltinHasattr,
ExpressionBuiltinSetattr,
makeExpressionAttributeLookup,
)
from nuitka.nodes.BuiltinAllNodes import ExpressionBuiltinAll
from nuitka.nodes.BuiltinAnyNodes import ExpressionBuiltinAny
from nuitka.nodes.BuiltinComplexNodes import (
ExpressionBuiltinComplex1,
ExpressionBuiltinComplex2,
)
from nuitka.nodes.BuiltinDecodingNodes import (
ExpressionBuiltinChr,
ExpressionBuiltinOrd,
)
from nuitka.nodes.BuiltinDecoratorNodes import (
ExpressionBuiltinClassmethod,
ExpressionBuiltinStaticmethod,
)
from nuitka.nodes.BuiltinDictNodes import ExpressionBuiltinDict
from nuitka.nodes.BuiltinFormatNodes import (
ExpressionBuiltinAscii,
ExpressionBuiltinBin,
ExpressionBuiltinFormat,
ExpressionBuiltinHex,
ExpressionBuiltinId,
ExpressionBuiltinOct,
)
from nuitka.nodes.BuiltinHashNodes import ExpressionBuiltinHash
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinInt1,
ExpressionBuiltinInt2,
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
ExpressionBuiltinIter2,
)
from nuitka.nodes.BuiltinLenNodes import ExpressionBuiltinLen
from nuitka.nodes.BuiltinNextNodes import (
ExpressionBuiltinNext1,
ExpressionBuiltinNext2,
)
from nuitka.nodes.BuiltinOpenNodes import ExpressionBuiltinOpen
from nuitka.nodes.BuiltinRangeNodes import (
ExpressionBuiltinRange1,
ExpressionBuiltinRange2,
ExpressionBuiltinRange3,
ExpressionBuiltinXrange1,
ExpressionBuiltinXrange2,
ExpressionBuiltinXrange3,
)
from nuitka.nodes.BuiltinRefNodes import (
ExpressionBuiltinAnonymousRef,
makeExpressionBuiltinTypeRef,
)
from nuitka.nodes.BuiltinSumNodes import (
ExpressionBuiltinSum1,
ExpressionBuiltinSum2,
)
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBool,
ExpressionBuiltinBytearray1,
ExpressionBuiltinBytearray3,
ExpressionBuiltinFloat,
ExpressionBuiltinFrozenset,
ExpressionBuiltinList,
ExpressionBuiltinSet,
ExpressionBuiltinStrP2,
ExpressionBuiltinStrP3,
ExpressionBuiltinTuple,
ExpressionBuiltinUnicodeP2,
)
from nuitka.nodes.BuiltinVarsNodes import ExpressionBuiltinVars
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ClassNodes import ExpressionBuiltinType3
from nuitka.nodes.ComparisonNodes import ExpressionComparisonIs
from nuitka.nodes.ConditionalNodes import (
ExpressionConditional,
makeStatementConditional,
)
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTupleOrConstant
from nuitka.nodes.ExecEvalNodes import (
ExpressionBuiltinCompile,
ExpressionBuiltinEval,
)
from nuitka.nodes.GlobalsLocalsNodes import (
ExpressionBuiltinDir1,
ExpressionBuiltinGlobals,
)
from nuitka.nodes.ImportNodes import ExpressionBuiltinImport
from nuitka.nodes.KeyValuePairNodes import (
makeKeyValuePairExpressionsFromKwArgs,
)
from nuitka.nodes.NodeMakingHelpers import (
makeConstantReplacementNode,
makeExpressionBuiltinLocals,
makeRaiseExceptionReplacementExpression,
makeRaiseExceptionReplacementExpressionFromInstance,
)
from nuitka.nodes.OperatorNodes import ExpressionOperationBinaryDivmod
from nuitka.nodes.OperatorNodesUnary import (
ExpressionOperationNot,
ExpressionOperationUnaryAbs,
ExpressionOperationUnaryRepr,
)
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import makeStatementReturn
from nuitka.nodes.SliceNodes import makeExpressionBuiltinSlice
from nuitka.nodes.TypeNodes import (
ExpressionBuiltinIsinstance,
ExpressionBuiltinIssubclass,
ExpressionBuiltinSuper0,
ExpressionBuiltinSuper2,
ExpressionBuiltinType1,
)
from nuitka.nodes.VariableAssignNodes import (
makeStatementAssignmentVariable,
makeStatementDelVariable,
)
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs import BuiltinParameterSpecs
from nuitka.tree.ReformulationExecStatements import wrapEvalGlobalsAndLocals
from nuitka.tree.ReformulationTryFinallyStatements import (
makeTryFinallyStatement,
)
from nuitka.tree.TreeHelpers import (
makeCallNode,
makeStatementsSequence,
makeStatementsSequenceFromStatement,
)
def dir_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def buildDirEmptyCase(source_ref):
source = makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
result = makeCallNode(
makeExpressionAttributeLookup(
expression=source, attribute_name="keys", source_ref=source_ref
),
source_ref,
)
# For Python3, keys doesn't really return values, but instead a handle
# only, but we want it to be a list.
if python_version >= 0x300:
result = ExpressionBuiltinList(value=result, source_ref=source_ref)
return result
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_scope attached.
builtin_class=ExpressionBuiltinDir1,
builtin_spec=BuiltinParameterSpecs.builtin_dir_spec,
empty_special_class=buildDirEmptyCase,
)
def vars_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def selectVarsEmptyClass(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_cope attached
builtin_class=ExpressionBuiltinVars,
builtin_spec=BuiltinParameterSpecs.builtin_vars_spec,
empty_special_class=selectVarsEmptyClass,
)
def import_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinImport,
builtin_spec=BuiltinParameterSpecs.builtin_import_spec,
)
def type_extractor(node):
args = node.subnode_args
if args is None:
iter_length = 0
else:
iter_length = args.getIterationLength()
if iter_length == 1:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType1,
builtin_spec=BuiltinParameterSpecs.builtin_type1_spec,
)
elif iter_length == 3:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType3,
builtin_spec=BuiltinParameterSpecs.builtin_type3_spec,
)
else:
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError("type() takes 1 or 3 arguments")
)
def iter_extractor(node):
def wrapIterCreation(callable_arg, sentinel, source_ref):
if sentinel is None:
return ExpressionBuiltinIter1(value=callable_arg, source_ref=source_ref)
else:
return ExpressionBuiltinIter2(
callable_arg=callable_arg, sentinel=sentinel, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapIterCreation,
builtin_spec=BuiltinParameterSpecs.builtin_iter_spec,
)
def next_extractor(node):
# Split up next with and without defaults, they are not going to behave
# really very similar.
def selectNextBuiltinClass(iterator, default, source_ref):
if default is None:
return ExpressionBuiltinNext1(value=iterator, source_ref=source_ref)
else:
return ExpressionBuiltinNext2(
iterator=iterator, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_next_spec,
)
def sum_extractor(node):
# Split up sumwith and without start value, one is much easier.
def selectSumBuiltinClass(sequence, start, source_ref):
if start is None:
return ExpressionBuiltinSum1(sequence=sequence, source_ref=source_ref)
else:
return ExpressionBuiltinSum2(
sequence=sequence, start=start, source_ref=source_ref
)
def makeSum0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError(
"sum expected at least 1 arguments, got 0"
if python_version < 0x380
else "sum() takes at least 1 positional argument (0 given)"
),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectSumBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_sum_spec,
empty_special_class=makeSum0,
)
def dict_extractor(node):
# The "dict" built-in is a bit strange in that it accepts a position
# parameter, or not, but won't have a default value.
def wrapExpressionBuiltinDictCreation(positional_args, dict_star_arg, source_ref):
if positional_args:
# Only one allowed, the spec converted too many into an exception.
(pos_arg,) = positional_args
else:
pos_arg = None
return ExpressionBuiltinDict(
pos_arg=pos_arg,
pairs=makeKeyValuePairExpressionsFromKwArgs(dict_star_arg),
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinDictCreation,
builtin_spec=BuiltinParameterSpecs.builtin_dict_spec,
)
def chr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinChr,
builtin_spec=BuiltinParameterSpecs.builtin_chr_spec,
)
def ord_extractor(node):
def makeOrd0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("ord() takes exactly one argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOrd,
builtin_spec=BuiltinParameterSpecs.builtin_ord_spec,
empty_special_class=makeOrd0,
)
def bin_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBin,
builtin_spec=BuiltinParameterSpecs.builtin_bin_spec,
)
def oct_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOct,
builtin_spec=BuiltinParameterSpecs.builtin_oct_spec,
)
def hex_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHex,
builtin_spec=BuiltinParameterSpecs.builtin_hex_spec,
)
def id_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinId,
builtin_spec=BuiltinParameterSpecs.builtin_id_spec,
)
def repr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryRepr,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
if python_version >= 0x300:
def ascii_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAscii,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
def range_extractor(node):
def selectRangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinRange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinRange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinRange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeRange0(source_ref):
# pylint: disable=unused-argument
try:
range()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectRangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_range_spec,
empty_special_class=makeRange0,
)
def xrange_extractor(node):
def selectXrangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinXrange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinXrange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinXrange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeXrange0(source_ref):
# pylint: disable=unused-argument
try:
xrange()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectXrangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_xrange_spec,
empty_special_class=makeXrange0,
)
def len_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinLen,
builtin_spec=BuiltinParameterSpecs.builtin_len_spec,
)
def all_extractor(node):
# pylint: disable=unused-argument
def makeAll0(source_ref):
exception_message = "all() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAll,
builtin_spec=BuiltinParameterSpecs.builtin_all_spec,
empty_special_class=makeAll0,
)
def abs_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryAbs,
builtin_spec=BuiltinParameterSpecs.builtin_abs_spec,
)
def any_extractor(node):
# pylint: disable=unused-argument
def makeAny0(source_ref):
exception_message = "any() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAny,
builtin_spec=BuiltinParameterSpecs.builtin_any_spec,
empty_special_class=makeAny0,
)
def tuple_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinTuple,
builtin_spec=BuiltinParameterSpecs.builtin_tuple_spec,
)
def list_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinList,
builtin_spec=BuiltinParameterSpecs.builtin_list_spec,
)
def set_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinSet,
builtin_spec=BuiltinParameterSpecs.builtin_set_spec,
)
def frozenset_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFrozenset,
builtin_spec=BuiltinParameterSpecs.builtin_frozenset_spec,
)
def float_extractor(node):
def makeFloat0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=float(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFloat,
builtin_spec=BuiltinParameterSpecs.builtin_float_spec,
empty_special_class=makeFloat0,
)
def complex_extractor(node):
def makeComplex0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=complex(), node=node, user_provided=False
)
def selectComplexBuiltin(real, imag, source_ref):
if imag is None:
return ExpressionBuiltinComplex1(value=real, source_ref=source_ref)
else:
return ExpressionBuiltinComplex2(
real=real, imag=imag, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectComplexBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_complex_spec,
empty_special_class=makeComplex0,
)
def str_extractor(node):
builtin_class = ExpressionBuiltinStrP2 if str is bytes else ExpressionBuiltinStrP3
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=builtin_class,
builtin_spec=builtin_class.builtin_spec,
)
if python_version < 0x300:
def unicode_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinUnicodeP2,
builtin_spec=ExpressionBuiltinUnicodeP2.builtin_spec,
)
else:
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBytes1,
ExpressionBuiltinBytes3,
)
def bytes_extractor(node):
def makeBytes0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bytes(), node=node, user_provided=False
)
def selectBytesBuiltin(string, encoding, errors, source_ref):
if encoding is None and errors is None:
return ExpressionBuiltinBytes1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytes3(
value=string,
encoding=encoding,
errors=errors,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectBytesBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_bytes_p3_spec,
empty_special_class=makeBytes0,
)
def bool_extractor(node):
def makeBool0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bool(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBool,
builtin_spec=BuiltinParameterSpecs.builtin_bool_spec,
empty_special_class=makeBool0,
)
def int_extractor(node):
def makeInt0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinInt1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinInt2(value=value, base=base, source_ref=source_ref)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeInt0,
)
if python_version < 0x300:
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinLong1,
ExpressionBuiltinLong2,
)
def long_extractor(node):
def makeLong0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinLong1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinLong2(
value=value, base=base, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeLong0,
)
def globals_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinGlobals,
builtin_spec=BuiltinParameterSpecs.builtin_globals_spec,
)
def locals_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def makeLocalsNode(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
# Note: Locals on the module level is really globals.
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeLocalsNode,
builtin_spec=BuiltinParameterSpecs.builtin_locals_spec,
)
if python_version < 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExecfile
def execfile_extractor(node):
def wrapExpressionBuiltinExecfileCreation(
filename, globals_arg, locals_arg, source_ref
):
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="execfile_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=node.getParentVariableProvider(),
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExecfile(
source_code=makeCallNode(
makeExpressionAttributeLookup(
expression=ExpressionBuiltinOpen(
filename=filename,
mode=makeConstantRefNode(
constant="rU", source_ref=source_ref
),
buffering=None,
source_ref=source_ref,
),
attribute_name="read",
source_ref=source_ref,
),
source_ref,
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinExecfileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_execfile_spec,
)
def eval_extractor(node):
def wrapEvalBuiltin(source, globals_arg, locals_arg, source_ref):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="eval_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
# The wrapping should not relocate to the "source_ref".
assert (
globals_arg is None
or globals_ref.getSourceReference() == globals_arg.getSourceReference()
)
assert (
locals_arg is None
or locals_ref.getSourceReference() == locals_arg.getSourceReference()
)
source_variable = outline_body.allocateTempVariable(
temp_scope=None, name="source"
)
final.setChild(
"statements",
final.subnode_statements
+ (
makeStatementDelVariable(
variable=source_variable, tolerant=True, source_ref=source_ref
),
),
)
strip_choice = makeConstantRefNode(constant=(" \t",), source_ref=source_ref)
if python_version >= 0x300:
strip_choice = ExpressionConditional(
condition=ExpressionComparisonIs(
left=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
source_ref=source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="bytes", source_ref=source_ref
),
source_ref=source_ref,
),
expression_yes=makeConstantRefNode(
constant=(b" \t",), source_ref=source_ref
),
expression_no=strip_choice,
source_ref=source_ref,
)
# Source needs some special treatment for eval, if it's a string, it
# must be stripped.
string_fixup = makeStatementAssignmentVariable(
variable=source_variable,
source=makeExpressionCall(
called=makeExpressionAttributeLookup(
expression=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
attribute_name="strip",
source_ref=source_ref,
),
args=strip_choice, # This is a tuple
kw=None,
source_ref=source_ref,
),
source_ref=source_ref,
)
acceptable_builtin_types = [
ExpressionBuiltinAnonymousRef(builtin_name="code", source_ref=source_ref)
]
if python_version >= 0x270:
acceptable_builtin_types.append(
makeExpressionBuiltinTypeRef(
builtin_name="memoryview", source_ref=source_ref
)
)
statements = (
makeStatementAssignmentVariable(
variable=source_variable, source=source, source_ref=source_ref
),
makeStatementConditional(
condition=ExpressionOperationNot(
operand=ExpressionBuiltinIsinstance(
instance=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
classes=makeExpressionMakeTupleOrConstant(
elements=acceptable_builtin_types,
user_provided=True,
source_ref=source_ref,
),
source_ref=source_ref,
),
source_ref=source_ref,
),
yes_branch=string_fixup,
no_branch=None,
source_ref=source_ref,
),
makeStatementReturn(
expression=ExpressionBuiltinEval(
source_code=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
)
tried = makeStatementsSequence(
statements=(tried,) + statements, allow_none=False, source_ref=source_ref
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapEvalBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
if python_version >= 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExec
def exec_extractor(node):
def wrapExpressionBuiltinExecCreation(
source, globals_arg, locals_arg, source_ref
):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=provider, name="exec_call", source_ref=source_ref
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExec(
source_code=source,
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
# Hack: Allow some APIs to work already
tried.parent = outline_body
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=provider,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinExecCreation,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
def compile_extractor(node):
def wrapExpressionBuiltinCompileCreation(
source_code, filename, mode, flags, dont_inherit, optimize=None, source_ref=None
):
return ExpressionBuiltinCompile(
source_code=source_code,
filename=filename,
mode=mode,
flags=flags,
dont_inherit=dont_inherit,
optimize=optimize,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinCompileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_compile_spec,
)
def open_extractor(node):
def makeOpen0(source_ref):
# pylint: disable=unused-argument
try:
# Not giving arguments or context on purpose
# pylint: disable=consider-using-with,unspecified-encoding
open()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("open without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOpen,
builtin_spec=BuiltinParameterSpecs.builtin_open_spec,
empty_special_class=makeOpen0,
)
def super_extractor(node):
def wrapSuperBuiltin(type_arg, object_arg, source_ref):
if type_arg is None and python_version >= 0x300:
if provider.isCompiledPythonModule():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
class_variable = provider.getVariableForReference(variable_name="__class__")
provider.trace_collection.getVariableCurrentTrace(class_variable).addUsage()
type_arg = ExpressionVariableRef(
# Ought to be already closure taken due to "super" flag in
# tree building.
variable=class_variable,
source_ref=source_ref,
)
# If we already have this as a local variable, then use that
# instead.
type_arg_owner = class_variable.getOwner()
if type_arg_owner is provider or not (
type_arg_owner.isExpressionFunctionBody()
or type_arg_owner.isExpressionClassBody()
):
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x331
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
if object_arg is None:
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionCoroutineObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
parameter_provider = provider.getParentVariableProvider()
else:
parameter_provider = provider
if parameter_provider.getParameters().getArgumentCount() == 0:
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
else:
par1_name = parameter_provider.getParameters().getArgumentNames()[0]
object_variable = provider.getVariableForReference(
variable_name=par1_name
)
provider.trace_collection.getVariableCurrentTrace(
object_variable
).addUsage()
object_arg = ExpressionVariableRef(
variable=object_variable, source_ref=source_ref
)
if not object_arg.getVariable().isParameterVariable():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x300
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
return ExpressionBuiltinSuper0(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
return ExpressionBuiltinSuper2(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
provider = node.getParentVariableProvider().getEntryPoint()
if not provider.isCompiledPythonModule():
provider.discardFlag("has_super")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSuperBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_super_spec,
)
def hasattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinHasattr(object, name, source_ref):
return ExpressionBuiltinHasattr(
expression=object, name=name, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinHasattr,
builtin_spec=BuiltinParameterSpecs.builtin_hasattr_spec,
)
def getattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinGetattr(object, name, default, source_ref):
return ExpressionBuiltinGetattr(
expression=object, name=name, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinGetattr,
builtin_spec=BuiltinParameterSpecs.builtin_getattr_spec,
)
def setattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinSetattr(object, name, value, source_ref):
return ExpressionBuiltinSetattr(
expression=object, name=name, value=value, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinSetattr,
builtin_spec=BuiltinParameterSpecs.builtin_setattr_spec,
)
def isinstance_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIsinstance,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def issubclass_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIssubclass,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def bytearray_extractor(node):
def makeBytearray0(source_ref):
return makeConstantRefNode(constant=bytearray(), source_ref=source_ref)
def selectNextBuiltinClass(string, encoding, errors, source_ref):
if encoding is None:
return ExpressionBuiltinBytearray1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytearray3(
string=string, encoding=encoding, errors=errors, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_bytearray_spec,
empty_special_class=makeBytearray0,
)
def slice_extractor(node):
def wrapSlice(start, stop, step, source_ref):
if start is not None and stop is None:
# Default rules are strange. If one argument is given, it's the
# second one then.
stop = start
start = None
return makeExpressionBuiltinSlice(
start=start, stop=stop, step=step, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSlice,
builtin_spec=BuiltinParameterSpecs.builtin_slice_spec,
)
def hash_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHash,
builtin_spec=BuiltinParameterSpecs.builtin_hash_spec,
)
def format_extractor(node):
def makeFormat0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("format() takes at least 1 argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFormat,
builtin_spec=BuiltinParameterSpecs.builtin_format_spec,
empty_special_class=makeFormat0,
)
def staticmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("staticmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinStaticmethod,
builtin_spec=BuiltinParameterSpecs.builtin_staticmethod_spec,
empty_special_class=makeStaticmethod0,
)
def classmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("classmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinClassmethod,
builtin_spec=BuiltinParameterSpecs.builtin_classmethod_spec,
empty_special_class=makeStaticmethod0,
)
def divmod_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationBinaryDivmod,
builtin_spec=BuiltinParameterSpecs.builtin_divmod_spec,
)
_dispatch_dict = {
"compile": compile_extractor,
"globals": globals_extractor,
"locals": locals_extractor,
"eval": eval_extractor,
"dir": dir_extractor,
"vars": vars_extractor,
"__import__": import_extractor,
"chr": chr_extractor,
"ord": ord_extractor,
"bin": bin_extractor,
"oct": oct_extractor,
"hex": hex_extractor,
"id": id_extractor,
"type": type_extractor,
"iter": iter_extractor,
"next": next_extractor,
"sum": sum_extractor,
"tuple": tuple_extractor,
"list": list_extractor,
"dict": dict_extractor,
"set": set_extractor,
"frozenset": frozenset_extractor,
"float": float_extractor,
"complex": complex_extractor,
"str": str_extractor,
"bool": bool_extractor,
"int": int_extractor,
"repr": repr_extractor,
"len": len_extractor,
"any": any_extractor,
"abs": abs_extractor,
"all": all_extractor,
"super": super_extractor,
"hasattr": hasattr_extractor,
"getattr": getattr_extractor,
"setattr": setattr_extractor,
"isinstance": isinstance_extractor,
"issubclass": issubclass_extractor,
"bytearray": bytearray_extractor,
"slice": slice_extractor,
"hash": hash_extractor,
"format": format_extractor,
"open": open_extractor,
"staticmethod": staticmethod_extractor,
"classmethod": classmethod_extractor,
"divmod": divmod_extractor,
}
if python_version < 0x300:
# These are not in Python3
_dispatch_dict["long"] = long_extractor
_dispatch_dict["unicode"] = unicode_extractor
_dispatch_dict["execfile"] = execfile_extractor
_dispatch_dict["xrange"] = xrange_extractor
_dispatch_dict["range"] = range_extractor
else:
# This one is not in Python2:
_dispatch_dict["bytes"] = bytes_extractor
_dispatch_dict["ascii"] = ascii_extractor
_dispatch_dict["exec"] = exec_extractor
# The Python3 range is really an xrange, use that.
_dispatch_dict["range"] = xrange_extractor
def check():
from nuitka.Builtins import builtin_names
for builtin_name in _dispatch_dict:
assert builtin_name in builtin_names, builtin_name
check()
_builtin_ignore_list = (
# Not supporting 'print', because it could be replaced, and is not
# worth the effort yet.
"print",
# TODO: This could, and should be supported, as we could e.g. lower
# types easily for it.
"sorted",
# TODO: This would be very worthwhile, as it could easily optimize
# its iteration away.
"zip",
# TODO: This would be most precious due to the type hint it gives
"enumerate",
# TODO: Also worthwhile for known values.
"reversed",
# TODO: Not sure what this really is about.
"memoryview",
)
def _describeNewNode(builtin_name, inspect_node):
"""Describe the change for better understanding."""
# Don't mention side effects, that's not what we care about.
if inspect_node.isExpressionSideEffects():
inspect_node = inspect_node.subnode_expression
if inspect_node.isExpressionBuiltinImport():
tags = "new_import"
message = """\
Replaced dynamic "__import__" call with static built-in call."""
elif inspect_node.isExpressionBuiltin() or inspect_node.isStatementExec():
tags = "new_builtin"
message = "Replaced call to built-in '%s' with built-in call '%s'." % (
builtin_name,
inspect_node.kind,
)
elif inspect_node.isExpressionRaiseException():
tags = "new_raise"
message = """\
Replaced call to built-in '%s' with exception raise.""" % (
builtin_name,
)
elif inspect_node.isExpressionOperationBinary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with binary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionOperationUnary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with unary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionCall():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with call.""" % (
builtin_name,
)
elif inspect_node.isExpressionOutlineBody():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with outlined call."""
% builtin_name
)
elif inspect_node.isExpressionConstantRef():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with constant value."""
% builtin_name
)
else:
assert False, (builtin_name, "->", inspect_node)
return tags, message
def computeBuiltinCall(builtin_name, call_node):
# There is some dispatching for how to output various types of changes,
# with lots of cases.
if builtin_name in _dispatch_dict:
new_node = _dispatch_dict[builtin_name](call_node)
assert new_node is not call_node, builtin_name
assert new_node is not None, builtin_name
# For traces, we are going to ignore side effects, and output traces
# only based on the basis of it.
tags, message = _describeNewNode(builtin_name, new_node)
return new_node, tags, message
else:
# TODO: Achieve coverage of all built-ins in at least the ignore list.
# if False and builtin_name not in _builtin_ignore_list:
# optimization_logger.warning(
# "Not handling built-in %r, consider support." % builtin_name
# )
return call_node, None, None | PypiClean |
/AnkiServer-2.0.6.tar.gz/AnkiServer-2.0.6/anki-bundled/anki/notes.py |
from anki.utils import fieldChecksum, intTime, \
joinFields, splitFields, stripHTMLMedia, timestampID, guid64
class Note(object):
def __init__(self, col, model=None, id=None):
assert not (model and id)
self.col = col
if id:
self.id = id
self.load()
else:
self.id = timestampID(col.db, "notes")
self.guid = guid64()
self._model = model
self.mid = model['id']
self.tags = []
self.fields = [""] * len(self._model['flds'])
self.flags = 0
self.data = ""
self._fmap = self.col.models.fieldMap(self._model)
self.scm = self.col.scm
def load(self):
(self.guid,
self.mid,
self.mod,
self.usn,
self.tags,
self.fields,
self.flags,
self.data) = self.col.db.first("""
select guid, mid, mod, usn, tags, flds, flags, data
from notes where id = ?""", self.id)
self.fields = splitFields(self.fields)
self.tags = self.col.tags.split(self.tags)
self._model = self.col.models.get(self.mid)
self._fmap = self.col.models.fieldMap(self._model)
self.scm = self.col.scm
def flush(self, mod=None):
"If fields or tags have changed, write changes to disk."
assert self.scm == self.col.scm
self._preFlush()
sfld = stripHTMLMedia(self.fields[self.col.models.sortIdx(self._model)])
tags = self.stringTags()
fields = self.joinedFields()
if not mod and self.col.db.scalar(
"select 1 from notes where id = ? and tags = ? and flds = ?",
self.id, tags, fields):
return
csum = fieldChecksum(self.fields[0])
self.mod = mod if mod else intTime()
self.usn = self.col.usn()
res = self.col.db.execute("""
insert or replace into notes values (?,?,?,?,?,?,?,?,?,?,?)""",
self.id, self.guid, self.mid,
self.mod, self.usn, tags,
fields, sfld, csum, self.flags,
self.data)
self.col.tags.register(self.tags)
self._postFlush()
def joinedFields(self):
return joinFields(self.fields)
def cards(self):
return [self.col.getCard(id) for id in self.col.db.list(
"select id from cards where nid = ? order by ord", self.id)]
def model(self):
return self._model
# Dict interface
##################################################
def keys(self):
return self._fmap.keys()
def values(self):
return self.fields
def items(self):
return [(f['name'], self.fields[ord])
for ord, f in sorted(self._fmap.values())]
def _fieldOrd(self, key):
try:
return self._fmap[key][0]
except:
raise KeyError(key)
def __getitem__(self, key):
return self.fields[self._fieldOrd(key)]
def __setitem__(self, key, value):
self.fields[self._fieldOrd(key)] = value
def __contains__(self, key):
return key in self._fmap.keys()
# Tags
##################################################
def hasTag(self, tag):
return self.col.tags.inList(tag, self.tags)
def stringTags(self):
return self.col.tags.join(self.col.tags.canonify(self.tags))
def setTagsFromStr(self, str):
self.tags = self.col.tags.split(str)
def delTag(self, tag):
rem = []
for t in self.tags:
if t.lower() == tag.lower():
rem.append(t)
for r in rem:
self.tags.remove(r)
def addTag(self, tag):
# duplicates will be stripped on save
self.tags.append(tag)
# Unique/duplicate check
##################################################
def dupeOrEmpty(self):
"1 if first is empty; 2 if first is a duplicate, False otherwise."
val = self.fields[0]
if not val.strip():
return 1
csum = fieldChecksum(val)
# find any matching csums and compare
for flds in self.col.db.list(
"select flds from notes where csum = ? and id != ? and mid = ?",
csum, self.id or 0, self.mid):
if stripHTMLMedia(
splitFields(flds)[0]) == stripHTMLMedia(self.fields[0]):
return 2
return False
# Flushing cloze notes
##################################################
def _preFlush(self):
# have we been added yet?
self.newlyAdded = not self.col.db.scalar(
"select 1 from cards where nid = ?", self.id)
def _postFlush(self):
# generate missing cards
if not self.newlyAdded:
rem = self.col.genCards([self.id])
# popping up a dialog while editing is confusing; instead we can
# document that the user should open the templates window to
# garbage collect empty cards
#self.col.remEmptyCards(ids) | PypiClean |
/ChanChanAuth-0.0.4.tar.gz/ChanChanAuth-0.0.4/src/chanchanauth/client.py | import json
from uuid import uuid4
import requests
from cryptography.fernet import Fernet, InvalidToken
from chanchanauth.types import AuthenticationResponse, RegistrationResponse, HWIDResetResponse
class Client(object):
def __init__(self, aid: str, apikey: str, secret: str = None):
self.aid = aid
self.apikey = apikey
self.fernet = None if secret is None else Fernet(bytes(secret, "utf-8"))
def authenticate(self, username: str, password: str, hwid: str):
if self.fernet is None:
raise ValueError("`secret` must not be none if you are authenticating.")
try:
response = requests.get(
url=f"https://api.ccauth.app/api/v3/authenticate?key={self.apikey}",
headers={
"aid": self.aid,
"data": self.fernet.encrypt(bytes(str({
"username": username,
"password": password,
"hwid": hwid,
"sessionID": str(uuid4())
}).replace("\'", "\""), "utf-8")).decode()
}
)
except Exception:
return AuthenticationResponse(
error=True,
error_message="Failed to connect to authentication server."
)
try:
resp_dict = json.loads(self.fernet.decrypt(bytes(response.text, "utf-8")).decode())
if response.status_code == 200:
return AuthenticationResponse(
is_authenticated=eval(resp_dict["is_Authenticated"]),
session_id=resp_dict["session_ID"],
expired_license=eval(resp_dict["expired_license"]),
invalid_hwid=eval(resp_dict["invalid_hwid"]),
invalid_credentials=eval(resp_dict["invalid_credentials"]),
account_type=resp_dict["accountType"]
)
else:
return AuthenticationResponse(
error=True,
error_message=resp_dict["type"]
)
except InvalidToken:
resp_dict = response.json()
return AuthenticationResponse(
error=eval(resp_dict["error"]),
error_message=resp_dict["type"]
)
except Exception:
return AuthenticationResponse(
error=True,
error_message="Failed to parse response."
)
def register(self, username: str, password: str, hwid: str, discord: str, license: str):
try:
response = requests.get(
url=f"https://api.ccauth.app/api/v2/register?key={self.apikey}",
headers={
"aid": self.aid,
"discord": discord,
"regkey": license,
"hwid": hwid,
"pass": password,
"user": username
}
)
except Exception:
return RegistrationResponse(
error=True,
error_message="Failed to connect to authentication server."
)
try:
resp_dict = response.json()
if response.status_code == 200:
return RegistrationResponse(
registration_enabled=eval(resp_dict["registration_enabled"]),
invalid_key=eval(resp_dict["invalid_key"]),
success=eval(resp_dict["success"]),
max_users=eval(resp_dict["max_users"])
)
else:
return RegistrationResponse(
error=True,
error_message=resp_dict["type"]
)
except json.JSONDecodeError:
return RegistrationResponse(
error=True,
error_message="Failed to parse response."
)
except Exception:
return RegistrationResponse(
error=True,
error_message="Server returned something unexpected."
)
def hwid_reset(self, username: str, password: str, hwid: str, hwid_key: str):
try:
response = requests.get(
url=f"https://api.ccauth.app/api/v3/reset?key={self.apikey}",
headers={
"hwidresetkey": hwid_key,
"aid": self.aid,
"newhwid": hwid,
"user": username,
"pass": password
}
)
except Exception:
return HWIDResetResponse(
error=True,
error_message="Failed to connect to authentication server."
)
try:
resp_dict = response.json()
if response.status_code == 200:
return HWIDResetResponse(
hwid_resets=eval(resp_dict["hwid_resets"]),
invalid_key=eval(resp_dict["invalid_key"]),
invalid_credentials=eval(resp_dict["invalid_credentials"]),
success=eval(resp_dict["success"]),
reset_today=eval(resp_dict["reset_today"])
)
else:
return HWIDResetResponse(
error=True,
error_message=resp_dict["type"]
)
except json.JSONDecodeError:
return RegistrationResponse(
error=True,
error_message="Failed to parse response."
)
except Exception:
return HWIDResetResponse(
error=True,
error_message="Server returned something unexpected."
) | PypiClean |
/Fern2-1.4.1.tar.gz/Fern2-1.4.1/fern/models/model.py | """"model file"""
import logging
from tensorflow.keras import Model, layers
logger = logging.getLogger()
class FernModel(object):
model: Model
def __init__(self, output_shape, max_seq_len, library_len, initializer='he_normal'):
"""
model builder
Parameters
----------
output_shape : dict[str, int], list[int], tuple[int]
output shape without batch size
max_seq_len : int
the max input sequence length
library_len : int
the world library length
initializer : str
global initializer
"""
self.output_shape = output_shape
self.max_seq_len = max_seq_len
self.library_len = library_len
self.initializer = initializer
self.name = self.__class__.__name__
self.model = self.build()
self.print_summary()
self.compile = self.model.compile
self.fit = self.model.fit
def print_summary(self):
"""
print summary of model
"""
summary = []
self.model.summary(print_fn=summary.append)
summary = '\n'.join(summary)
logger.info(f"\n{summary}")
def build(self):
"""
build model
Returns
-------
Model
built model
"""
raise NotImplementedError
def save(self, path):
"""
save model
Parameters
----------
path : str, pathlib.Path
The model file path
"""
self.model.save(path)
def load(self, path):
"""
load model
Parameters
----------
path : str, pathlib.Path
The model file path
"""
self.model.load_weights(path)
@property
def predict(self):
return self.model.predict
def __call__(self, *args, **kwargs):
return self.model(*args, **kwargs)
@property
def trainable_variables(self):
return self.model.trainable_variables
class TextCNN(FernModel):
"""
References
----------
Optimization of model Convolutional Neural Networks for Sentence Classification
(https://arxiv.org/pdf/1408.5882.pdf)
"""
def build(self):
inp = layers.Input(shape=(self.max_seq_len,))
x = layers.Embedding(self.library_len, 256, embeddings_initializer=self.initializer)(inp)
x = layers.Conv1D(256, 5, padding='same', kernel_initializer=self.initializer)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.GlobalMaxPool1D()(x)
x = layers.Dense(128, kernel_initializer=self.initializer)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
ys = []
for key in self.output_shape:
y = layers.Dense(self.output_shape[key],
kernel_initializer=self.initializer,
activation='softmax',
name=key)(x)
ys.append(y)
model = Model(inputs=inp, outputs=ys, name=self.name) # if len(ys) == 1, than ys = ys[0]
return model | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.