query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Rectangle with a solid central region then Gaussian fall - off at the edges .
def smooth_rectangle ( x , y , rec_w , rec_h , gaussian_width_x , gaussian_width_y ) : gaussian_x_coord = abs ( x ) - rec_w / 2.0 gaussian_y_coord = abs ( y ) - rec_h / 2.0 box_x = np . less ( gaussian_x_coord , 0.0 ) box_y = np . less ( gaussian_y_coord , 0.0 ) sigmasq_x = gaussian_width_x * gaussian_width_x sigmasq_y = gaussian_width_y * gaussian_width_y with float_error_ignore ( ) : falloff_x = x * 0.0 if sigmasq_x == 0.0 else np . exp ( np . divide ( - gaussian_x_coord * gaussian_x_coord , 2 * sigmasq_x ) ) falloff_y = y * 0.0 if sigmasq_y == 0.0 else np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq_y ) ) return np . minimum ( np . maximum ( box_x , falloff_x ) , np . maximum ( box_y , falloff_y ) )
1,400
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patternfn.py#L178-L197
[ "def", "getDatabaseStats", "(", "self", ")", ":", "headers", "=", "(", "'datname'", ",", "'numbackends'", ",", "'xact_commit'", ",", "'xact_rollback'", ",", "'blks_read'", ",", "'blks_hit'", ",", "'tup_returned'", ",", "'tup_fetched'", ",", "'tup_inserted'", ",", "'tup_updated'", ",", "'tup_deleted'", ",", "'disk_size'", ")", "cur", "=", "self", ".", "_conn", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "\"SELECT %s, pg_database_size(datname) FROM pg_stat_database;\"", "%", "\",\"", ".", "join", "(", "headers", "[", ":", "-", "1", "]", ")", ")", "rows", "=", "cur", ".", "fetchall", "(", ")", "dbstats", "=", "self", ".", "_createStatsDict", "(", "headers", ",", "rows", ")", "totals", "=", "self", ".", "_createTotalsDict", "(", "headers", ",", "rows", ")", "return", "{", "'databases'", ":", "dbstats", ",", "'totals'", ":", "totals", "}" ]
Pack an arbitrary set of iterables and non - iterables into tuples .
def pack_tups ( * args ) : # Imports import numpy as np # Debug flag _DEBUG = False # Marker value for non-iterable items NOT_ITER = - 1 # Uninitialized test value UNINIT_VAL = - 1 # Print the input if in debug mode if _DEBUG : # pragma: no cover print ( "args = {0}" . format ( args ) ) # Non-iterable subclass of str class StrNoIter ( str ) : """ Non-iterable subclass of |str|. """ def __iter__ ( self ) : raise NotImplementedError ( "Non-iterable string" ) ## end def __iter__ ## end class StrNoIter # Re-wrap input arguments with non-iterable strings if required mod_args = [ ( StrNoIter ( a ) if isinstance ( a , str ) else a ) for a in args ] # Determine the length or non-iterable status of each item and store # the maximum value (depends on NOT_ITER < 0) iterlens = [ ( len ( a ) if iterable ( a ) else NOT_ITER ) for a in mod_args ] maxiter = max ( iterlens ) # Check to ensure all iterables are the same length if not all ( map ( lambda v : v in ( NOT_ITER , maxiter ) , iterlens ) ) : raise ValueError ( "All iterable items must be of equal length" ) ## end if # If everything is non-iterable, just return the args tuple wrapped in # a list (as above, depends on NOT_ITER < 0) if maxiter == NOT_ITER : return [ args ] ## end if # Swap any non-iterables for a suitable length repeat, and zip to # tuples for return tups = list ( zip ( * [ ( np . repeat ( a , maxiter ) if l == NOT_ITER else a ) for ( a , l ) in zip ( mod_args , iterlens ) ] ) ) # Dump the resulting tuples, if in debug mode if _DEBUG : # pragma: no cover print ( "tups = {0}" . format ( tups ) ) ## end if # Return the tuples return tups
1,401
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L44-L139
[ "def", "finish_upload_and_wait", "(", "self", ",", "component", ",", "length", ",", "timeout", "=", "2", ",", "interval", "=", "0.1", ")", ":", "try", ":", "rsp", "=", "self", ".", "finish_firmware_upload", "(", "component", ",", "length", ")", "check_completion_code", "(", "rsp", ".", "completion_code", ")", "except", "CompletionCodeError", "as", "e", ":", "if", "e", ".", "cc", "==", "CC_LONG_DURATION_CMD_IN_PROGRESS", ":", "self", ".", "wait_for_long_duration_command", "(", "constants", ".", "CMDID_HPM_FINISH_FIRMWARE_UPLOAD", ",", "timeout", ",", "interval", ")", "else", ":", "raise", "HpmError", "(", "'finish_firmware_upload CC=0x%02x'", "%", "e", ".", "cc", ")" ]
Performs a safe typecast .
def safe_cast ( invar , totype ) : # Make the typecast. Just use Python built-in exceptioning outvar = totype ( invar ) # Check that the cast type matches if not isinstance ( outvar , totype ) : raise TypeError ( "Result of cast to '{0}' is '{1}'" . format ( totype , type ( outvar ) ) ) ## end if # Success; return the cast value return outvar
1,402
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L168-L205
[ "def", "returnJobReqs", "(", "self", ",", "jobReqs", ")", ":", "# Since we are only reading this job's specific values from the state file, we don't", "# need a lock", "jobState", "=", "self", ".", "_JobState", "(", "self", ".", "_CacheState", ".", "_load", "(", "self", ".", "cacheStateFile", ")", ".", "jobState", "[", "self", ".", "jobID", "]", ")", "for", "x", "in", "list", "(", "jobState", ".", "jobSpecificFiles", ".", "keys", "(", ")", ")", ":", "self", ".", "deleteLocalFile", "(", "x", ")", "with", "self", ".", "_CacheState", ".", "open", "(", "self", ")", "as", "cacheInfo", ":", "cacheInfo", ".", "sigmaJob", "-=", "jobReqs" ]
Generate an hour - minutes - seconds timestamp from an interval in seconds .
def make_timestamp ( el_time ) : # Calc hours hrs = el_time // 3600.0 # Calc minutes mins = ( el_time % 3600.0 ) // 60.0 # Calc seconds secs = el_time % 60.0 # Construct timestamp string stamp = "{0}h {1}m {2}s" . format ( int ( hrs ) , int ( mins ) , int ( secs ) ) # Return return stamp
1,403
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L210-L244
[ "def", "undefine", "(", "self", ")", ":", "if", "lib", ".", "EnvUndefgeneric", "(", "self", ".", "_env", ",", "self", ".", "_gnc", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")", "self", ".", "_env", "=", "None" ]
Check for consistency of two geometries and atom symbol lists
def check_geom ( c1 , a1 , c2 , a2 , tol = _DEF . XYZ_COORD_MATCH_TOL ) : # Import(s) from . . const import atom_num import numpy as np from . . const import EnumCheckGeomMismatch as ECGM # Initialize return value to success condition match = True #** Check coords for suitable shape. Assume 1-D np.arrays. if not len ( c1 . shape ) == 1 : # Cannot coerce to vector; complain. raise ValueError ( ( "'c1' is not a vector." ) ) ## end if if not len ( c2 . shape ) == 1 : # Cannot coerce to vector; complain. raise ValueError ( ( "'c2' is not a vector." ) ) ## end if #** Check atoms for suitable shape. Assume lists of strings, so # convert to np.array to check. if not len ( a1 . shape ) == 1 : # Not a vector; complain raise ValueError ( ( "'a1' is not a simple list." ) ) ## end if if not len ( a2 . shape ) == 1 : # Not a vector; complain. raise ValueError ( ( "'a2' is not a simple list." ) ) ## end if #** Confirm proper lengths of coords vs atoms if not c1 . shape [ 0 ] == 3 * a1 . shape [ 0 ] : raise ValueError ( "len(c1) != 3*len(a1)" ) ## end if if not c2 . shape [ 0 ] == 3 * a2 . shape [ 0 ] : raise ValueError ( "len(c2) != 3*len(a2)" ) ## end if #** Confirm matching lengths of coords and atoms w/corresponding # objects among the two geometries if not c1 . shape [ 0 ] == c2 . shape [ 0 ] : match = False fail_type = ECGM . DIMENSION return match , fail_type , None ## end if #** Element-wise check for geometry match to within 'tol' fail_loc = np . less_equal ( np . abs ( np . subtract ( c1 , c2 ) ) , tol ) if sum ( fail_loc ) != c2 . shape [ 0 ] : # Count of matching coordinates should equal the number of # coordinates. If not, complain with 'coord_mismatch' fail type. match = False fail_type = ECGM . COORDS return match , fail_type , fail_loc ## end if #** Element-wise check for atoms match. Quietly convert both input and # instance atom arrays to atom_nums to allow np.equals comparison. if np . issubdtype ( a1 . dtype , np . dtype ( 'str' ) ) : # Presume atomic symbol data and attempt conversion a1 = np . array ( [ atom_num [ e ] for e in a1 ] ) ## end if if np . issubdtype ( a2 . dtype , np . dtype ( 'str' ) ) : # Presume atomic symbol data and attempt conversion a2 = np . array ( [ atom_num [ e ] for e in a2 ] ) ## end if fail_loc = np . equal ( a1 , a2 ) #** Perform the test to ensure all atoms match. if sum ( fail_loc ) != a2 . shape [ 0 ] : # Count of matching atoms should equal number of atoms. If not, # complain with the 'atom_mismatch' fail type. match = False fail_type = ECGM . ATOMS return match , fail_type , fail_loc #** If reached here, all tests passed; return success. return match , None , None
1,404
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L250-L424
[ "def", "stop", "(", "self", ")", ":", "self", ".", "camera", ".", "_get_config", "(", ")", "[", "'actions'", "]", "[", "'movie'", "]", ".", "set", "(", "False", ")", "self", ".", "videofile", "=", "self", ".", "camera", ".", "_wait_for_event", "(", "event_type", "=", "lib", ".", "GP_EVENT_FILE_ADDED", ")", "if", "self", ".", "_old_captarget", "!=", "\"Memory card\"", ":", "self", ".", "camera", ".", "config", "[", "'settings'", "]", "[", "'capturetarget'", "]", ".", "set", "(", "self", ".", "_old_captarget", ")" ]
Perform substitution of content into tagged string .
def template_subst ( template , subs , delims = ( '<' , '>' ) ) : # Store the template into the working variable subst_text = template # Iterate over subs and perform the .replace() calls for ( k , v ) in subs . items ( ) : subst_text = subst_text . replace ( delims [ 0 ] + k + delims [ 1 ] , v ) ## next tup # Return the result return subst_text
1,405
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L429-L488
[ "def", "delay_and_stop", "(", "duration", ",", "dll", ",", "device_number", ")", ":", "xinput", "=", "getattr", "(", "ctypes", ".", "windll", ",", "dll", ")", "time", ".", "sleep", "(", "duration", "/", "1000", ")", "xinput_set_state", "=", "xinput", ".", "XInputSetState", "xinput_set_state", ".", "argtypes", "=", "[", "ctypes", ".", "c_uint", ",", "ctypes", ".", "POINTER", "(", "XinputVibration", ")", "]", "xinput_set_state", ".", "restype", "=", "ctypes", ".", "c_uint", "vibration", "=", "XinputVibration", "(", "0", ",", "0", ")", "xinput_set_state", "(", "device_number", ",", "ctypes", ".", "byref", "(", "vibration", ")", ")" ]
Assert a value is an |nparray| of NumPy floats .
def assert_npfloatarray ( obj , varname , desc , exc , tc , errsrc ) : # Imports import numpy as np # Check for whether member or object is to be checked if varname is None : var = obj else : # Try to get the variable to be typechecked try : var = getattr ( obj , varname ) except AttributeError : raise exc ( tc , "Attribute '{0}' not defined in '{1}'" . format ( varname , obj ) , errsrc ) ## end try ## end if # Try to pull the np dtype off of it try : dt = var . dtype except AttributeError : raise exc ( tc , "'{0}' is not an np.array (lacks a 'dtype' member)" . format ( desc ) , errsrc ) else : if not var . shape : raise exc ( tc , "'{0}' is not an np.array ('len(shape)' < 1)" . format ( desc ) , errsrc ) ## end try # Confirm dtype inherits from np.float if not np . issubdtype ( dt , np . float ) : raise exc ( tc , "'{0}' is not an np.array of np.float" . format ( desc ) , errsrc )
1,406
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/base.py#L531-L609
[ "def", "DeactivateCard", "(", "self", ",", "card", ")", ":", "if", "hasattr", "(", "card", ",", "'connection'", ")", ":", "card", ".", "connection", ".", "disconnect", "(", ")", "if", "None", "!=", "self", ".", "parent", ".", "apdutracerpanel", ":", "card", ".", "connection", ".", "deleteObserver", "(", "self", ".", "parent", ".", "apdutracerpanel", ")", "delattr", "(", "card", ",", "'connection'", ")", "self", ".", "dialogpanel", ".", "OnDeactivateCard", "(", "card", ")" ]
Advance the base iterator publish to constituent iterators .
def advance ( self ) : elem = next ( self . _iterable ) for deque in self . _deques : deque . append ( elem )
1,407
https://github.com/mila-iqia/picklable-itertools/blob/e00238867875df0258cf4f83f528d846e7c1afc4/picklable_itertools/tee.py#L36-L40
[ "def", "_getPath", "(", ")", ":", "if", "os", ".", "name", "==", "\"posix\"", ":", "path", "=", "os", ".", "getenv", "(", "\"HOME\"", ")", "+", "\"/.config/google-chrome/Default/Cookies\"", "return", "path", "import", "_winreg", "key", "=", "_winreg", ".", "OpenKey", "(", "_winreg", ".", "HKEY_CURRENT_USER", ",", "'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders'", ")", "path", "=", "_winreg", ".", "QueryValueEx", "(", "key", ",", "'Local AppData'", ")", "[", "0", "]", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'Google\\\\Chrome\\\\User Data\\\\Default\\\\Cookies'", ")", "return", "path" ]
Advance the parameters for each generator for this presentation .
def _advance_pattern_generators ( self , p ) : valid_generators = [ ] for g in p . generators : for trial in range ( self . max_trials ) : # Generate a new position and add generator if it's ok if np . alltrue ( [ self . __distance_valid ( g , v , p ) for v in valid_generators ] ) : valid_generators . append ( g ) break g . force_new_dynamic_value ( 'x' ) g . force_new_dynamic_value ( 'y' ) else : self . warning ( "Unable to place pattern %s subject to given constraints" % g . name ) return valid_generators
1,408
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/deprecated.py#L64-L92
[ "def", "WriteFD", "(", "self", ",", "Channel", ",", "MessageBuffer", ")", ":", "try", ":", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_WriteFD", "(", "Channel", ",", "byref", "(", "MessageBuffer", ")", ")", "return", "TPCANStatus", "(", "res", ")", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.WriteFD\"", ")", "raise" ]
Explicitly generate new values for these parameters only when appropriate .
def _advance_params ( self ) : for p in [ 'x' , 'y' , 'direction' ] : self . force_new_dynamic_value ( p ) self . last_time = self . time_fn ( )
1,409
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/deprecated.py#L236-L243
[ "def", "retrieve_trace", "(", "self", ",", "filename", "=", "None", ",", "dir", "=", "None", ")", ":", "if", "hasattr", "(", "self", ",", "\"applicationTrace\"", ")", "and", "self", ".", "applicationTrace", "is", "not", "None", ":", "logger", ".", "debug", "(", "\"Retrieving PE trace: \"", "+", "self", ".", "applicationTrace", ")", "if", "not", "filename", ":", "filename", "=", "_file_name", "(", "'pe'", ",", "self", ".", "id", ",", "'.trace'", ")", "return", "self", ".", "rest_client", ".", "_retrieve_file", "(", "self", ".", "applicationTrace", ",", "filename", ",", "dir", ",", "'text/plain'", ")", "else", ":", "return", "None" ]
Register a settings class with the switcher . Can be passed the settings class to register or be used as a decorator .
def register ( self , settings_class = NoSwitcher , * simple_checks , * * conditions ) : if settings_class is NoSwitcher : def decorator ( cls ) : self . register ( cls , * simple_checks , * * conditions ) return cls return decorator available_checks = self . checks . keys ( ) for condition in conditions . keys ( ) : if condition not in available_checks : raise InvalidCondition ( 'There is no check for the condition "%s"' % condition ) self . _registry . append ( ( settings_class , simple_checks , conditions ) )
1,410
https://github.com/matthewwithanm/django-classbasedsettings/blob/ac9e4362bd1f4954f3e4679b97726cab2b22aea9/cbsettings/switching/__init__.py#L32-L60
[ "def", "add_hard_count", "(", "self", ")", ":", "self", ".", "wait_duration", "=", "int", "(", "numpy", ".", "ceil", "(", "self", ".", "total_corruption", "/", "self", ".", "sample_rate", "+", "self", ".", "psd_duration", ")", ")", "self", ".", "invalidate_psd", "(", ")" ]
Get the next line without consuming it .
def _peek_buffer ( self , i = 0 ) : while len ( self . _buffer ) <= i : self . _buffer . append ( next ( self . _source ) ) return self . _buffer [ i ]
1,411
https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L37-L41
[ "def", "make_attrs", "(", "attrs", "=", "None", ",", "library", "=", "None", ")", ":", "default_attrs", "=", "{", "\"created_at\"", ":", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "isoformat", "(", ")", "}", "if", "library", "is", "not", "None", ":", "library_name", "=", "library", ".", "__name__", "default_attrs", "[", "\"inference_library\"", "]", "=", "library_name", "try", ":", "version", "=", "pkg_resources", ".", "get_distribution", "(", "library_name", ")", ".", "version", "default_attrs", "[", "\"inference_library_version\"", "]", "=", "version", "except", "pkg_resources", ".", "DistributionNotFound", ":", "if", "hasattr", "(", "library", ",", "\"__version__\"", ")", ":", "version", "=", "library", ".", "__version__", "default_attrs", "[", "\"inference_library_version\"", "]", "=", "version", "if", "attrs", "is", "not", "None", ":", "default_attrs", ".", "update", "(", "attrs", ")", "return", "default_attrs" ]
Make a readline - like function which peeks into the source .
def _make_readline_peeker ( self ) : counter = itertools . count ( 0 ) def readline ( ) : try : return self . _peek_buffer ( next ( counter ) ) except StopIteration : return '' return readline
1,412
https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L52-L60
[ "def", "cache_train", "(", "self", ")", ":", "filename", "=", "self", ".", "get_cache_location", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "False", "categories", "=", "pickle", ".", "load", "(", "open", "(", "filename", ",", "'rb'", ")", ")", "assert", "isinstance", "(", "categories", ",", "BayesCategories", ")", ",", "\"Cache data is either corrupt or invalid\"", "self", ".", "categories", "=", "categories", "# Updating our per-category overall probabilities", "self", ".", "calculate_category_probability", "(", ")", "return", "True" ]
Add a node to the graph and the stack .
def _add_node ( self , node , depth ) : self . _topmost_node . add_child ( node , bool ( depth [ 1 ] ) ) self . _stack . append ( ( depth , node ) )
1,413
https://github.com/mikeboers/PyHAML/blob/9ecb7c85349948428474869aad5b8d1c7de8dbed/haml/parse.py#L386-L389
[ "def", "characterize_local_files", "(", "filedir", ",", "max_bytes", "=", "MAX_FILE_DEFAULT", ")", ":", "file_data", "=", "{", "}", "logging", ".", "info", "(", "'Characterizing files in {}'", ".", "format", "(", "filedir", ")", ")", "for", "filename", "in", "os", ".", "listdir", "(", "filedir", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "filedir", ",", "filename", ")", "file_stats", "=", "os", ".", "stat", "(", "filepath", ")", "creation_date", "=", "arrow", ".", "get", "(", "file_stats", ".", "st_ctime", ")", ".", "isoformat", "(", ")", "file_size", "=", "file_stats", ".", "st_size", "if", "file_size", "<=", "max_bytes", ":", "file_md5", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "filepath", ",", "\"rb\"", ")", "as", "f", ":", "for", "chunk", "in", "iter", "(", "lambda", ":", "f", ".", "read", "(", "4096", ")", ",", "b\"\"", ")", ":", "file_md5", ".", "update", "(", "chunk", ")", "md5", "=", "file_md5", ".", "hexdigest", "(", ")", "file_data", "[", "filename", "]", "=", "{", "'tags'", ":", "guess_tags", "(", "filename", ")", ",", "'description'", ":", "''", ",", "'md5'", ":", "md5", ",", "'creation_date'", ":", "creation_date", ",", "}", "return", "file_data" ]
Internal function for making XYZ object from explicit geom data .
def _load_data ( self , atom_syms , coords , bohrs = True ) : # Imports import numpy as np from . const import atom_num , PHYS from . error import XYZError # Gripe if already initialized if 'geoms' in dir ( self ) : raise XYZError ( XYZError . OVERWRITE , "Cannot overwrite contents of existing OpanXYZ" , "" ) ## end if # Check and store dimensions if not len ( coords . shape ) == 1 : raise ValueError ( "Coordinates are not a vector" ) ## end if if not len ( atom_syms . shape ) == 1 : raise ValueError ( "Atom symbols are not a simple list" ) ## end if if not coords . shape [ 0 ] == 3 * atom_syms . shape [ 0 ] : raise ValueError ( "len(coords) != 3 * len(atom_syms)" ) ## end if # Proof the atoms list if not all ( ( atom_syms [ i ] . upper ( ) in atom_num ) for i in range ( atom_syms . shape [ 0 ] ) ) : # Invalid atoms specified raise ValueError ( "Invalid atoms specified: {0}" . format ( [ ( j , atom_syms [ j ] ) for j in ( i for ( i , valid ) in enumerate ( map ( lambda k : k in atom_num , atom_syms ) ) if not valid ) ] ) ) ## end if # Ensure the geometry is all numeric if not all ( map ( np . isreal , coords ) ) : raise ValueError ( "All coordinates must be real numeric" ) ## end if # Store the number of atoms. Only one geometry. Standard string # content for things only relevant to file load. self . num_atoms = atom_syms . shape [ 0 ] self . num_geoms = 1 self . in_str = self . LOAD_DATA_FLAG self . descs = np . array ( [ self . LOAD_DATA_FLAG ] ) self . XYZ_path = self . LOAD_DATA_FLAG # Store the atoms as vector self . atom_syms = list ( map ( str . upper , list ( atom_syms ) ) ) # Store the single geometry by bracketing with an array self . geoms = [ coords / ( 1.0 if bohrs else PHYS . ANG_PER_BOHR ) ]
1,414
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L354-L438
[ "def", "_get_partition_info", "(", "storage_system", ",", "device_path", ")", ":", "try", ":", "partition_infos", "=", "storage_system", ".", "RetrieveDiskPartitionInfo", "(", "devicePath", "=", "[", "device_path", "]", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "log", ".", "trace", "(", "'partition_info = %s'", ",", "partition_infos", "[", "0", "]", ")", "return", "partition_infos", "[", "0", "]" ]
Iterator over a subset of geometries .
def geom_iter ( self , g_nums ) : # Using the custom coded pack_tups to not have to care whether the # input is iterable from . utils import pack_tups vals = pack_tups ( g_nums ) for val in vals : yield self . geom_single ( val [ 0 ] )
1,415
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L725-L770
[ "def", "url_to_resource", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "request", "=", "get_current_request", "(", ")", "# cnv = request.registry.getAdapter(request, IResourceUrlConverter)", "reg", "=", "get_current_registry", "(", ")", "cnv", "=", "reg", ".", "getAdapter", "(", "request", ",", "IResourceUrlConverter", ")", "return", "cnv", ".", "url_to_resource", "(", "url", ")" ]
Distance between two atoms .
def dist_single ( self , g_num , at_1 , at_2 ) : # Import used math library function(s) import numpy as np from scipy import linalg as spla from . utils import safe_cast as scast # The below errors are explicitly thrown since values are multiplied by # three when they are used as an index and thus give non-intuitive # errors in subsequent code. # Complain if at_1 is invalid if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) # Complain if at_2 is invalid if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) # Should never be necessary (save for badly erroneous calling code), # but coerce at_1 and at_2 to their floor() values. This is again # needed since they are multiplied by three in the index expresssions # below, and can cause funny behavior when truncated by the indexing at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) # Calculate the interatomic distance and return. Return identically # zero if the indices are equal if at_1 == at_2 : dist = 0.0 else : dist = scast ( spla . norm ( self . displ_single ( g_num , at_1 , at_2 ) ) , np . float_ ) ## end if return dist
1,416
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L775-L836
[ "def", "delete_file", "(", "self", ",", "fmfile", ")", ":", "if", "not", "isinstance", "(", "fmfile", ",", "dict", ")", ":", "raise", "FMFileError", "(", "'fmfile must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'file_delete'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", ",", "'fileid'", ":", "fmfile", ".", "get", "(", "'fileid'", ")", "}", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "self", ".", "_complete", "=", "True", "return", "True", "hellraiser", "(", "res", ")" ]
Iterator over selected interatomic distances .
def dist_iter ( self , g_nums , ats_1 , ats_2 , invalid_error = False ) : # Imports import numpy as np from . utils import pack_tups # Print the function inputs if debug mode is on if _DEBUG : # pragma: no cover print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) ## end if # Perform the None substitution arglist = self . _none_subst ( g_nums , ats_1 , ats_2 ) # Expand/pack the tuples from the inputs tups = pack_tups ( * arglist ) # Dump the results if debug mode is on if _DEBUG : # pragma: no cover print ( tups ) ## end if # Construct the generator using the packed tuples. If 'None' expansion # was used, return None for any invalid indices instead of raising # an exception. for tup in tups : yield self . _iter_return ( tup , self . dist_single , invalid_error )
1,417
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L841-L913
[ "def", "deauthorize_application", "(", "request", ")", ":", "if", "request", ".", "facebook", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "facebook_id", "=", "request", ".", "facebook", ".", "signed_request", ".", "user", ".", "id", ")", "user", ".", "authorized", "=", "False", "user", ".", "save", "(", ")", "return", "HttpResponse", "(", ")", "else", ":", "return", "HttpResponse", "(", "status", "=", "400", ")" ]
Spanning angle among three atoms .
def angle_single ( self , g_num , at_1 , at_2 , at_3 ) : # Imports import numpy as np from . utils import safe_cast as scast from . utils . vector import vec_angle # The below errors are explicitly thrown since they are multiplied by # three when they are used as an index and thus give non-intuitive # errors in later code. # Complain if at_1 is invalid if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) # Complain if at_2 is invalid if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) # Complain if at_3 is invalid if not ( - self . num_atoms <= at_3 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_3' ({0})" . format ( at_3 ) ) # Should never be necessary (save for badly erroneous calling code), # but coerce the at_x to their floor() values. This is again # needed since they are multiplied by three in the index expresssions # below, and can cause funny behavior when truncated by the indexing at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) at_3 = scast ( np . floor ( at_3 ) , np . int_ ) # Complain if at_2 is equal to either at_1 or at_3. Must factor in # the possibility of negative indexing via modulo arithmetic. if ( at_2 % self . num_atoms ) == ( at_1 % self . num_atoms ) : raise ValueError ( "'at_1' and 'at_2' must be different" ) if ( at_2 % self . num_atoms ) == ( at_3 % self . num_atoms ) : raise ValueError ( "'at_2' and 'at_3' must be different" ) # Trivial return if at_1 and at_3 are the same if ( at_1 % self . num_atoms ) == ( at_3 % self . num_atoms ) : # Angle is identically zero in this case return 0.0 ## end if # Store the displacement vectors from at_2 to at_1 and to at_3 # The np.float64 type should be retained through the displ_single call. vec_2_1 = self . displ_single ( g_num , at_2 , at_1 ) vec_2_3 = self . displ_single ( g_num , at_2 , at_3 ) # Compute and return the calculated angle, in degrees # v1 {dot} v2 == |v1||v2| * cos(theta) angle = vec_angle ( vec_2_1 , vec_2_3 ) return angle
1,418
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L919-L1010
[ "def", "load_clients", "(", "stream", ",", "configuration_class", "=", "ClientConfiguration", ")", ":", "client_dict", "=", "yaml", ".", "safe_load", "(", "stream", ")", "if", "isinstance", "(", "client_dict", ",", "dict", ")", ":", "return", "{", "client_name", ":", "configuration_class", "(", "*", "*", "client_config", ")", "for", "client_name", ",", "client_config", "in", "six", ".", "iteritems", "(", "client_dict", ")", "}", "raise", "ValueError", "(", "\"Valid configuration could not be decoded.\"", ")" ]
Iterator over selected atomic angles .
def angle_iter ( self , g_nums , ats_1 , ats_2 , ats_3 , invalid_error = False ) : # Suitability of ats_n indices will be checked within the # self.angle_single() calls and thus no check is needed here. # Import the tuple-generating function from . utils import pack_tups # Print the function inputs if debug mode is on if _DEBUG : # pragma: no cover print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) print ( "ats_3 = {0}" . format ( ats_3 ) ) ## end if # Perform the None substitution arglist = self . _none_subst ( g_nums , ats_1 , ats_2 , ats_3 ) # Expand/pack the tuples from the inputs tups = pack_tups ( * arglist ) # Dump the results if debug mode is on if _DEBUG : # pragma: no cover print ( tups ) ## end if # Construct the generator using the packed tuples. for tup in tups : if _DEBUG : # pragma: no cover print ( tup ) ## end if yield self . _iter_return ( tup , self . angle_single , invalid_error )
1,419
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1015-L1098
[ "def", "ParseVideoRow", "(", "self", ",", "parser_mediator", ",", "query", ",", "row", ",", "*", "*", "unused_kwargs", ")", ":", "query_hash", "=", "hash", "(", "query", ")", "event_data", "=", "KodiVideoEventData", "(", ")", "event_data", ".", "filename", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'strFilename'", ")", "event_data", ".", "play_count", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'playCount'", ")", "event_data", ".", "query", "=", "query", "timestamp", "=", "self", ".", "_GetRowValue", "(", "query_hash", ",", "row", ",", "'lastPlayed'", ")", "date_time", "=", "dfdatetime_time_elements", ".", "TimeElements", "(", ")", "date_time", ".", "CopyFromDateTimeString", "(", "timestamp", ")", "event", "=", "time_events", ".", "DateTimeValuesEvent", "(", "date_time", ",", "definitions", ".", "TIME_DESCRIPTION_LAST_VISITED", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Iterator over selected dihedral angles .
def dihed_iter ( self , g_nums , ats_1 , ats_2 , ats_3 , ats_4 , invalid_error = False ) : # Suitability of ats_n indices will be checked within the # self.dihed_single() calls and thus no check is needed here. # Import the tuple-generating function from . utils import pack_tups # Print the function inputs if debug mode is on if _DEBUG : # pragma: no cover print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) print ( "ats_3 = {0}" . format ( ats_3 ) ) print ( "ats_4 = {0}" . format ( ats_4 ) ) ## end if # Perform the None substitution arglist = self . _none_subst ( g_nums , ats_1 , ats_2 , ats_3 , ats_4 ) # Expand/pack the tuples from the inputs tups = pack_tups ( * arglist ) # Dump the results if debug mode is on if _DEBUG : # pragma: no cover print ( tups ) ## end if # Construct the generator using the packed tuples. for tup in tups : yield self . _iter_return ( tup , self . dihed_single , invalid_error )
1,420
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1293-L1384
[ "def", "_bind_topics", "(", "self", ",", "topics", ")", ":", "# FIXME: Allow for these subscriptions to fail and clean up the previous ones", "# so that this function is atomic", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "status", ",", "self", ".", "_on_status_message", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "tracing", ",", "self", ".", "_on_trace", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "streaming", ",", "self", ".", "_on_report", ")", "self", ".", "client", ".", "subscribe", "(", "topics", ".", "response", ",", "self", ".", "_on_response_message", ")" ]
Displacement vector between two atoms .
def displ_single ( self , g_num , at_1 , at_2 ) : # Library imports import numpy as np from . utils import safe_cast as scast # The below errors are explicitly thrown since they are multiplied by # three when they are used as an index and thus give non-intuitive # errors. # Complain if at_1 is invalid if not ( - self . num_atoms <= at_1 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_1' ({0})" . format ( at_1 ) ) # Complain if at_2 is invalid if not ( - self . num_atoms <= at_2 < self . num_atoms ) : raise IndexError ( "Invalid index for 'at_2' ({0})" . format ( at_2 ) ) # Should never be necessary (save for badly erroneous calling code), # but coerce at_1 and at_2 to their floor() values. This is again # needed since they are multiplied by three in the index expresssions # below, and can cause funny behavior when truncated by the indexing at_1 = scast ( np . floor ( at_1 ) , np . int_ ) at_2 = scast ( np . floor ( at_2 ) , np . int_ ) # If the atom indices are the same, return trivial zero vector if ( at_1 % self . num_atoms ) == ( at_2 % self . num_atoms ) : return np . array ( [ 0.0 , 0.0 , 0.0 ] ) ## end if # Retrieve the geometry; np.float_ type should be retained g = self . geom_single ( g_num ) # Calculate the displacement vector and return displ = np . array ( [ g [ i + 3 * at_2 ] - g [ i + 3 * at_1 ] for i in range ( 3 ) ] ) # Return the displacement vector return displ
1,421
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1390-L1457
[ "def", "_form_datetimes", "(", "days", ",", "msecs", ")", ":", "all_datetimes", "=", "[", "]", "for", "i", "in", "range", "(", "days", ".", "size", ")", ":", "day", "=", "int", "(", "days", "[", "i", "]", ")", "msec", "=", "msecs", "[", "i", "]", "scanline_datetimes", "=", "[", "]", "for", "j", "in", "range", "(", "int", "(", "VALUES_PER_SCAN_LINE", "/", "4", ")", ")", ":", "usec", "=", "1000", "*", "(", "j", "*", "VIEW_TIME_ADJUSTMENT", "+", "msec", ")", "delta", "=", "(", "dt", ".", "timedelta", "(", "days", "=", "day", ",", "microseconds", "=", "usec", ")", ")", "for", "k", "in", "range", "(", "4", ")", ":", "scanline_datetimes", ".", "append", "(", "delta", ".", "total_seconds", "(", ")", ")", "all_datetimes", ".", "append", "(", "scanline_datetimes", ")", "return", "np", ".", "array", "(", "all_datetimes", ",", "dtype", "=", "np", ".", "float64", ")" ]
Iterator over indicated displacement vectors .
def displ_iter ( self , g_nums , ats_1 , ats_2 , invalid_error = False ) : # Import the tuple-generating function from . utils import pack_tups # Print the function inputs if debug mode is on if _DEBUG : # pragma: no cover print ( "g_nums = {0}" . format ( g_nums ) ) print ( "ats_1 = {0}" . format ( ats_1 ) ) print ( "ats_2 = {0}" . format ( ats_2 ) ) ## end if # Perform the None substitution arglist = self . _none_subst ( g_nums , ats_1 , ats_2 ) # Expand/pack the tuples from the inputs tups = pack_tups ( * arglist ) # Dump the results if debug mode is on if _DEBUG : # pragma: no cover print ( tups ) ## end if # Construct the generator using the packed tuples. for tup in tups : yield self . _iter_return ( tup , self . displ_single , invalid_error )
1,422
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1462-L1531
[ "def", "ProcessHttpRequest", "(", "self", ",", "http_request", ")", ":", "http_request", ".", "headers", ".", "update", "(", "self", ".", "additional_http_headers", ")", "if", "self", ".", "log_request", ":", "logging", ".", "info", "(", "'Making http %s to %s'", ",", "http_request", ".", "http_method", ",", "http_request", ".", "url", ")", "logging", ".", "info", "(", "'Headers: %s'", ",", "pprint", ".", "pformat", "(", "http_request", ".", "headers", ")", ")", "if", "http_request", ".", "body", ":", "# TODO(craigcitro): Make this safe to print in the case of", "# non-printable body characters.", "logging", ".", "info", "(", "'Body:\\n%s'", ",", "http_request", ".", "loggable_body", "or", "http_request", ".", "body", ")", "else", ":", "logging", ".", "info", "(", "'Body: (none)'", ")", "return", "http_request" ]
Helper function to insert full ranges for |None| for X_iter methods .
def _none_subst ( self , * args ) : # Imports import numpy as np # Initialize argument list return value, and as None not found arglist = [ a for a in args ] none_found = False # Check for None values none_vals = list ( map ( lambda e : isinstance ( e , type ( None ) ) , arglist ) ) # Error if more than one None; handle if exactly one; pass through if # none. if np . count_nonzero ( none_vals ) > 1 : raise ValueError ( "Multiple 'None' values [indices {0}] not supported" . format ( tuple ( np . nonzero ( none_vals ) [ 0 ] ) ) ) elif np . count_nonzero ( none_vals ) == 1 : # Must be no iterables that are not strings. Thus, an element-wise # test for iterability and an element-wise test for stringiness # must give matching arrays if not all ( np . equal ( list ( map ( np . iterable , arglist ) ) , list ( map ( lambda e : isinstance ( e , str ) , arglist ) ) ) ) : raise ValueError ( "'None' as parameter invalid with non-str iterables" ) ## end if # Parameters okay; replace the None with the appropriate range() none_found = True none_loc = np . nonzero ( none_vals ) [ 0 ] [ 0 ] arglist [ none_loc ] = range ( self . num_geoms if none_loc == 0 else self . num_atoms ) ## end if # Return the arguments list and the none-found value return arglist
1,423
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/xyz.py#L1537-L1599
[ "def", "stop", "(", "self", ")", ":", "log", ".", "info", "(", "'Stopping te kafka listener class'", ")", "self", ".", "consumer", ".", "unsubscribe", "(", ")", "self", ".", "consumer", ".", "close", "(", ")" ]
Return a URL that is most likely to route to local_host from outside .
def guess_external_url ( local_host , port ) : if local_host in [ '0.0.0.0' , '::' ] : # The server is listening on all interfaces, but we have to pick one. # The system's FQDN should give us a hint. local_host = socket . getfqdn ( ) # https://github.com/vfaronov/turq/issues/9 match = IPV4_REVERSE_DNS . match ( local_host ) if match : local_host = '.' . join ( reversed ( match . groups ( ) ) ) else : match = IPV6_REVERSE_DNS . match ( local_host ) if match : address_as_int = int ( '' . join ( reversed ( match . groups ( ) ) ) , 16 ) local_host = str ( IPv6Address ( address_as_int ) ) if ':' in local_host : # Looks like an IPv6 literal. Has to be wrapped in brackets in a URL. # Also, an IPv6 address can have a zone ID tacked on the end, # like "%3". RFC 6874 allows encoding them in URLs as well, # but in my experiments on Windows 8.1, I had more success # removing the zone ID altogether. After all this is just a guess. local_host = '[%s]' % local_host . rsplit ( '%' , 1 ) [ 0 ] return 'http://%s:%d/' % ( local_host , port )
1,424
https://github.com/vfaronov/turq/blob/3ef1261442b90d6d947b8fe2362e19e7f47a64c3/turq/util/http.py#L49-L79
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "'Integrated module: {}'", ".", "format", "(", "MODULE_NAME", ")", ")", "# Wrap the httplib request function", "request_func", "=", "getattr", "(", "httplib", ".", "HTTPConnection", ",", "HTTPLIB_REQUEST_FUNC", ")", "wrapped_request", "=", "wrap_httplib_request", "(", "request_func", ")", "setattr", "(", "httplib", ".", "HTTPConnection", ",", "request_func", ".", "__name__", ",", "wrapped_request", ")", "# Wrap the httplib response function", "response_func", "=", "getattr", "(", "httplib", ".", "HTTPConnection", ",", "HTTPLIB_RESPONSE_FUNC", ")", "wrapped_response", "=", "wrap_httplib_response", "(", "response_func", ")", "setattr", "(", "httplib", ".", "HTTPConnection", ",", "response_func", ".", "__name__", ",", "wrapped_response", ")" ]
Make sure columns are of the same length or else DataFrame construction will fail .
def _check_column_lengths ( self ) : column_lengths_dict = { name : len ( xs ) for ( name , xs ) in self . columns_dict . items ( ) } unique_column_lengths = set ( column_lengths_dict . values ( ) ) if len ( unique_column_lengths ) != 1 : raise ValueError ( "Mismatch between lengths of columns: %s" % ( column_lengths_dict , ) )
1,425
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/dataframe_builder.py#L169-L182
[ "def", "delete_attachments", "(", "self", ",", "volumeID", ",", "attachmentsID", ")", ":", "log", ".", "debug", "(", "\"deleting attachments from volume '{}': {}\"", ".", "format", "(", "volumeID", ",", "attachmentsID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "insID", "=", "[", "a", "[", "'id'", "]", "for", "a", "in", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", "]", "# check that all requested file are present", "for", "id", "in", "attachmentsID", ":", "if", "id", "not", "in", "insID", ":", "raise", "NotFoundException", "(", "\"could not found attachment '{}' of the volume '{}'\"", ".", "format", "(", "id", ",", "volumeID", ")", ")", "for", "index", ",", "id", "in", "enumerate", "(", "attachmentsID", ")", ":", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", ".", "pop", "(", "insID", ".", "index", "(", "id", ")", ")", "self", ".", "_db", ".", "modify_book", "(", "volumeID", ",", "rawVolume", "[", "'_source'", "]", ",", "version", "=", "rawVolume", "[", "'_version'", "]", ")" ]
Initialize with data from files .
def new_from_files ( self , basepath , basename , repo , bohrs = False , software = _E_SW . ORCA , repo_clobber = False , * * kwargs ) : # Imports import os from os import path as osp from . . xyz import OpanXYZ as OX from . . grad import OrcaEngrad as OE from . . hess import OrcaHess as OH from . repo import OpanAnharmRepo as OR from . . const import EnumDispDirection as E_DDir , EnumFileType as E_FT from . . const import EnumSoftware as E_SW from . . const import DEF from . . error import AnharmError as ANHErr ## # Store working directory for restore? ## prev_dir = os.getcwd() # Complain if anything is already bound if not self . w_xyz == None : raise ANHErr ( ANHErr . STATUS , "XYZ object is already bound" , "" ) ## end if if not self . w_grad == None : raise ANHErr ( ANHErr . STATUS , "GRAD object is already bound" , "" ) ## end if if not self . w_hess == None : raise ANHErr ( ANHErr . STATUS , "HESS object is already bound" , "" ) ## end if if not self . repo == None : raise ANHErr ( ANHErr . STATUS , "Repository object is already bound" , "" ) ## end if # RESUME: vpt2--factor for loading from different software pkgs # Load the three data files self . w_xyz = OX ( osp . join ( basepath , basename + osp . extsep + xyz_ext ) ) self . w_grad = OE ( osp . join ( basepath , basename + osp . extsep + engrad_ext ) , 0 , E_DDir . NO_DISP , 0.0 ) self . w_hess = OH ( osp . join ( basepath , basename + osp . extsep + hess_ext ) , 0 , E_DDir . NO_DISP , 0.0 ) # Only accept new repos for now if not isinstance ( repo , str ) : raise TypeError ( "Must create new repository when loading " + "a new dataset." ) ## end if # Repo is string, treat as filename and try to load # Check if it's a complete path # If it's a relative path, prepend the basepath if osp . split ( repo [ 0 ] ) > 0 and not osp . isabs ( repo ) : repo = osp . join ( basepath , repo ) ## end if # Complain if it's a directory if osp . isdir ( repo ) : raise IOError ( "Cannot bind repository -- specified " + "location is a directory" ) ## end if # If file exists ... if osp . isfile ( repo ) : # Depending on clobber, either delete existing or raise error if repo_clobber : # Clobber old repo os . remove ( repo ) else : # Raise error raise IOError ( "Target repository file exists and " + "clobber is disabled." ) ## end if ## end if # Should be good to create the repo self . repo = OR ( repo )
1,426
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/vpt2/base.py#L54-L144
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Metaclass that wraps exception type in RemoteException
def remote_exception ( exc , tb ) : if type ( exc ) in exceptions : typ = exceptions [ type ( exc ) ] return typ ( exc , tb ) else : try : typ = type ( exc . __class__ . __name__ , ( RemoteException , type ( exc ) ) , { 'exception_type' : type ( exc ) } ) exceptions [ type ( exc ) ] = typ return typ ( exc , tb ) except TypeError : return exc
1,427
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/utils.py#L61-L74
[ "def", "read_struct", "(", "fstream", ")", ":", "line", "=", "fstream", ".", "readline", "(", ")", ".", "strip", "(", ")", "fragments", "=", "line", ".", "split", "(", "\",\"", ")", "fragments", "=", "[", "x", "for", "x", "in", "fragments", "if", "x", "is", "not", "None", "]", "partition", "=", "dict", "(", ")", "if", "not", "len", "(", "fragments", ")", ">=", "3", ":", "return", "None", "partition", "[", "\"struct\"", "]", "=", "fragments", "[", "0", "]", "partition", "[", "\"info\"", "]", "=", "fragments", "[", "1", "]", "partition", "[", "\"num_lines\"", "]", "=", "fragments", "[", "2", "]", "struct", "=", "None", "if", "partition", "is", "not", "None", "and", "partition", "[", "\"struct\"", "]", "==", "\"STRUCT\"", ":", "num_lines", "=", "int", "(", "partition", "[", "\"num_lines\"", "]", ".", "strip", "(", ")", ")", "struct", "=", "{", "}", "for", "_", "in", "range", "(", "num_lines", ")", ":", "cols", "=", "fetch_cols", "(", "fstream", ")", "struct", ".", "update", "(", "{", "cols", "[", "0", "]", ":", "cols", "[", "1", ":", "]", "}", ")", "return", "struct" ]
Generates sequence of tuples each containing a variant paired with a list of AlleleRead objects .
def reads_overlapping_variants ( variants , samfile , * * kwargs ) : chromosome_names = set ( samfile . references ) for variant in variants : # I imagine the conversation went like this: # A: "Hey, I have an awesome idea" # B: "What's up?" # A: "Let's make two nearly identical reference genomes" # B: "But...that sounds like it might confuse people." # A: "Nah, it's cool, we'll give the chromosomes different prefixes!" # B: "OK, sounds like a good idea." if variant . contig in chromosome_names : chromosome = variant . contig elif "chr" + variant . contig in chromosome_names : chromosome = "chr" + variant . contig else : logger . warn ( "Chromosome '%s' from variant %s not in alignment file %s" , chromosome , variant , samfile . filename ) yield variant , [ ] continue allele_reads = reads_overlapping_variant ( samfile = samfile , chromosome = chromosome , variant = variant , * * kwargs ) yield variant , allele_reads
1,428
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L234-L280
[ "def", "get_or_create", "(", "cls", ",", "source", ",", "target", ",", "cost", ",", "topology", "=", "None", ")", ":", "try", ":", "return", "cls", ".", "get_link", "(", "source", ",", "target", ",", "topology", ")", "except", "LinkNotFound", "as", "e", ":", "pass", "# create link", "link", "=", "Link", "(", "interface_a", "=", "e", ".", "interface_a", ",", "interface_b", "=", "e", ".", "interface_b", ",", "status", "=", "LINK_STATUS", "[", "'active'", "]", ",", "metric_value", "=", "cost", ",", "topology", "=", "topology", ")", "link", ".", "full_clean", "(", ")", "link", ".", "save", "(", ")", "return", "link" ]
Returns dictionary mapping each allele s nucleotide sequence to a list of supporting AlleleRead objects .
def group_reads_by_allele ( allele_reads ) : allele_to_reads_dict = defaultdict ( list ) for allele_read in allele_reads : allele_to_reads_dict [ allele_read . allele ] . append ( allele_read ) return allele_to_reads_dict
1,429
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L283-L291
[ "def", "locations_within", "(", "a", ",", "b", ",", "tolerance", ")", ":", "ret", "=", "''", "# Clone b so that we can destroy it.", "b", "=", "dict", "(", "b", ")", "for", "(", "key", ",", "value", ")", "in", "a", ".", "items", "(", ")", ":", "if", "key", "not", "in", "b", ":", "raise", "ValueError", "(", "\"b does not have the key: \"", "+", "key", ")", "if", "abs", "(", "int", "(", "value", ")", "-", "int", "(", "b", "[", "key", "]", ")", ")", ">", "tolerance", ":", "ret", "+=", "'key {0} differs: {1} {2}'", ".", "format", "(", "key", ",", "int", "(", "value", ")", ",", "int", "(", "b", "[", "key", "]", ")", ")", "del", "b", "[", "key", "]", "if", "b", ":", "raise", "ValueError", "(", "\"keys in b not seen in a: \"", "+", "\", \"", ".", "join", "(", "b", ".", "keys", "(", ")", ")", ")", "return", "ret" ]
Given a single LocusRead object return either an AlleleRead or None
def from_locus_read ( cls , locus_read , n_ref ) : sequence = locus_read . sequence reference_positions = locus_read . reference_positions # positions of the nucleotides before and after the variant within # the read sequence read_pos_before = locus_read . base0_read_position_before_variant read_pos_after = locus_read . base0_read_position_after_variant # positions of the nucleotides before and after the variant on the # reference genome ref_pos_before = reference_positions [ read_pos_before ] if ref_pos_before is None : logger . warn ( "Missing reference pos for nucleotide before variant on read: %s" , locus_read ) return None ref_pos_after = reference_positions [ read_pos_after ] if ref_pos_after is None : logger . warn ( "Missing reference pos for nucleotide after variant on read: %s" , locus_read ) return None if n_ref == 0 : if ref_pos_after - ref_pos_before != 1 : # if the number of nucleotides skipped isn't the same # as the number of reference nucleotides in the variant then # don't use this read logger . debug ( "Positions before (%d) and after (%d) variant should be adjacent on read %s" , ref_pos_before , ref_pos_after , locus_read ) return None # insertions require a sequence of non-aligned bases # followed by the subsequence reference position ref_positions_for_inserted = reference_positions [ read_pos_before + 1 : read_pos_after ] if any ( insert_pos is not None for insert_pos in ref_positions_for_inserted ) : # all these inserted nucleotides should *not* align to the # reference logger . debug ( "Skipping read, inserted nucleotides shouldn't map to reference" ) return None else : # substitutions and deletions if ref_pos_after - ref_pos_before != n_ref + 1 : # if the number of nucleotides skipped isn't the same # as the number of reference nucleotides in the variant then # don't use this read logger . debug ( ( "Positions before (%d) and after (%d) variant should be " "adjacent on read %s" ) , ref_pos_before , ref_pos_after , locus_read ) return None nucleotides_at_variant_locus = sequence [ read_pos_before + 1 : read_pos_after ] prefix = sequence [ : read_pos_before + 1 ] suffix = sequence [ read_pos_after : ] prefix , suffix = convert_from_bytes_if_necessary ( prefix , suffix ) prefix , suffix = trim_N_nucleotides ( prefix , suffix ) return cls ( prefix , nucleotides_at_variant_locus , suffix , name = locus_read . name )
1,430
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/allele_reads.py#L52-L140
[ "def", "update_classroom", "(", "self", ",", "course", ",", "classroomid", ",", "new_data", ")", ":", "student_list", ",", "tutor_list", ",", "other_students", ",", "_", "=", "self", ".", "get_user_lists", "(", "course", ",", "classroomid", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"tutors\"", "]", ")", "if", "tutor", "in", "tutor_list", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "new_data", "[", "\"students\"", "]", "=", "map", "(", "str", ".", "strip", ",", "new_data", "[", "\"students\"", "]", ")", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "students", ".", "append", "(", "student", ")", "else", ":", "if", "student", "in", "other_students", ":", "# Remove user from the other classroom", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "tutor_list", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "student_list", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "map", "(", "str", ".", "strip", ",", "group", "[", "\"students\"", "]", ")", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "classroom", "=", "self", ".", "database", ".", "classrooms", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "classroomid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "classroom", ",", "errored_students" ]
Find the most common nucleotide at each offset to the left and right of a variant .
def most_common_nucleotides ( partitioned_read_sequences ) : counts , variant_column_indices = nucleotide_counts ( partitioned_read_sequences ) max_count_per_column = counts . max ( axis = 0 ) assert len ( max_count_per_column ) == counts . shape [ 1 ] max_nucleotide_index_per_column = np . argmax ( counts , axis = 0 ) assert len ( max_nucleotide_index_per_column ) == counts . shape [ 1 ] nucleotides = [ index_to_dna_nucleotide [ idx ] for idx in max_nucleotide_index_per_column ] other_nucleotide_counts = counts . sum ( axis = 0 ) - max_count_per_column return "" . join ( nucleotides ) , max_count_per_column , other_nucleotide_counts
1,431
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/nucleotide_counts.py#L81-L112
[ "def", "active_inactive", "(", "self", ")", ":", "active_indices", "=", "[", "]", "inactive_indices", "=", "[", "]", "for", "index", ",", "active", "in", "enumerate", "(", "self", ".", "active", ")", ":", "if", "active", ":", "active_indices", ".", "append", "(", "index", ")", "else", ":", "inactive_indices", ".", "append", "(", "index", ")", "return", "active_indices", ",", "inactive_indices" ]
Calculate the displacement vector between two n - D points .
def point_displ ( pt1 , pt2 ) : #Imports import numpy as np # Make iterable if not np . iterable ( pt1 ) : pt1 = np . float64 ( np . array ( [ pt1 ] ) ) else : pt1 = np . float64 ( np . array ( pt1 ) . squeeze ( ) ) ## end if if not np . iterable ( pt2 ) : pt2 = np . float64 ( np . array ( [ pt2 ] ) ) else : pt2 = np . float64 ( np . array ( pt2 ) . squeeze ( ) ) ## end if # Calculate the displacement vector and return displ = np . matrix ( np . subtract ( pt2 , pt1 ) ) . reshape ( 3 , 1 ) return displ
1,432
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L42-L68
[ "def", "update_company_user", "(", "self", ",", "email", ",", "userdata", ")", ":", "if", "not", "isinstance", "(", "userdata", ",", "dict", ")", ":", "raise", "AttributeError", "(", "'userdata must be a <dict>'", ")", "method", ",", "url", "=", "get_URL", "(", "'company_update_user'", ")", "payload", "=", "{", "'apikey'", ":", "self", ".", "config", ".", "get", "(", "'apikey'", ")", ",", "'logintoken'", ":", "self", ".", "session", ".", "cookies", ".", "get", "(", "'logintoken'", ")", ",", "'useremail'", ":", "email", "}", "payload", ".", "update", "(", "userdata", ")", "res", "=", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "url", ",", "params", "=", "payload", ")", "if", "res", ".", "status_code", "==", "200", ":", "return", "True", "hellraiser", "(", "res", ")" ]
Calculate the Euclidean distance between two n - D points .
def point_dist ( pt1 , pt2 ) : # Imports from scipy import linalg as spla dist = spla . norm ( point_displ ( pt1 , pt2 ) ) return dist
1,433
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L73-L86
[ "def", "_parse_state_file", "(", "state_file_path", "=", "'terraform.tfstate'", ")", ":", "ret", "=", "{", "}", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "state_file_path", ",", "'r'", ")", "as", "fh_", ":", "tfstate", "=", "salt", ".", "utils", ".", "json", ".", "load", "(", "fh_", ")", "modules", "=", "tfstate", ".", "get", "(", "'modules'", ")", "if", "not", "modules", ":", "log", ".", "error", "(", "'Malformed tfstate file. No modules found'", ")", "return", "ret", "for", "module", "in", "modules", ":", "resources", "=", "module", ".", "get", "(", "'resources'", ",", "[", "]", ")", "for", "resource_name", ",", "resource", "in", "salt", ".", "ext", ".", "six", ".", "iteritems", "(", "resources", ")", ":", "roster_entry", "=", "None", "if", "resource", "[", "'type'", "]", "==", "'salt_host'", ":", "roster_entry", "=", "_handle_salt_host_resource", "(", "resource", ")", "if", "not", "roster_entry", ":", "continue", "minion_id", "=", "roster_entry", ".", "get", "(", "MINION_ID", ",", "resource", ".", "get", "(", "'id'", ")", ")", "if", "not", "minion_id", ":", "continue", "if", "MINION_ID", "in", "roster_entry", ":", "del", "roster_entry", "[", "MINION_ID", "]", "_add_ssh_key", "(", "roster_entry", ")", "ret", "[", "minion_id", "]", "=", "roster_entry", "return", "ret" ]
Rotate a 3 - D point around a 3 - D axis through the origin .
def point_rotate ( pt , ax , theta ) : # Imports import numpy as np # Ensure pt is reducible to 3-D vector. pt = make_nd_vec ( pt , nd = 3 , t = np . float64 , norm = False ) # Calculate the rotation rot_pt = np . dot ( mtx_rot ( ax , theta , reps = 1 ) , pt ) # Should be ready to return return rot_pt
1,434
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L91-L118
[ "def", "renew_compose", "(", "self", ",", "compose_id", ")", ":", "logger", ".", "info", "(", "\"Renewing compose %d\"", ",", "compose_id", ")", "response", "=", "self", ".", "session", ".", "patch", "(", "'{}composes/{}'", ".", "format", "(", "self", ".", "url", ",", "compose_id", ")", ")", "response", ".", "raise_for_status", "(", ")", "response_json", "=", "response", ".", "json", "(", ")", "compose_id", "=", "response_json", "[", "'id'", "]", "logger", ".", "info", "(", "\"Renewed compose is %d\"", ",", "compose_id", ")", "return", "response_json" ]
Reflect a 3 - D point through a plane intersecting the origin .
def point_reflect ( pt , nv ) : # Imports import numpy as np from scipy import linalg as spla # Ensure pt is reducible to 3-D vector pt = make_nd_vec ( pt , nd = 3 , t = np . float64 , norm = False ) # Transform the point and return refl_pt = np . dot ( mtx_refl ( nv , reps = 1 ) , pt ) return refl_pt
1,435
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L123-L145
[ "def", "memory_read64", "(", "self", ",", "addr", ",", "num_long_words", ")", ":", "buf_size", "=", "num_long_words", "buf", "=", "(", "ctypes", ".", "c_ulonglong", "*", "buf_size", ")", "(", ")", "units_read", "=", "self", ".", "_dll", ".", "JLINKARM_ReadMemU64", "(", "addr", ",", "buf_size", ",", "buf", ",", "0", ")", "if", "units_read", "<", "0", ":", "raise", "errors", ".", "JLinkException", "(", "units_read", ")", "return", "buf", "[", ":", "units_read", "]" ]
Reflection symmetry operation .
def geom_reflect ( g , nv ) : # Imports import numpy as np # Force g to n-vector g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) # Transform the geometry and return refl_g = np . dot ( mtx_refl ( nv , reps = ( g . shape [ 0 ] // 3 ) ) , g ) . reshape ( ( g . shape [ 0 ] , 1 ) ) return refl_g
1,436
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L150-L169
[ "def", "_update_color_hsv", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "None", "or", "event", ".", "widget", ".", "old_value", "!=", "event", ".", "widget", ".", "get", "(", ")", ":", "h", "=", "self", ".", "hue", ".", "get", "(", ")", "s", "=", "self", ".", "saturation", ".", "get", "(", ")", "v", "=", "self", ".", "value", ".", "get", "(", ")", "sel_color", "=", "hsv_to_rgb", "(", "h", ",", "s", ",", "v", ")", "self", ".", "red", ".", "set", "(", "sel_color", "[", "0", "]", ")", "self", ".", "green", ".", "set", "(", "sel_color", "[", "1", "]", ")", "self", ".", "blue", ".", "set", "(", "sel_color", "[", "2", "]", ")", "if", "self", ".", "alpha_channel", ":", "sel_color", "+=", "(", "self", ".", "alpha", ".", "get", "(", ")", ",", ")", "self", ".", "alphabar", ".", "set_color", "(", "sel_color", ")", "hexa", "=", "rgb_to_hexa", "(", "*", "sel_color", ")", "self", ".", "hexa", ".", "delete", "(", "0", ",", "\"end\"", ")", "self", ".", "hexa", ".", "insert", "(", "0", ",", "hexa", ")", "self", ".", "square", ".", "set_hsv", "(", "(", "h", ",", "s", ",", "v", ")", ")", "self", ".", "bar", ".", "set", "(", "h", ")", "self", ".", "_update_preview", "(", ")" ]
Rotation symmetry operation .
def geom_rotate ( g , ax , theta ) : # Imports import numpy as np # Force g to n-vector g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) # Perform rotation and return rot_g = np . dot ( mtx_rot ( ax , theta , reps = ( g . shape [ 0 ] // 3 ) ) , g ) . reshape ( ( g . shape [ 0 ] , 1 ) ) return rot_g
1,437
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L174-L195
[ "def", "_update_color_hsv", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "None", "or", "event", ".", "widget", ".", "old_value", "!=", "event", ".", "widget", ".", "get", "(", ")", ":", "h", "=", "self", ".", "hue", ".", "get", "(", ")", "s", "=", "self", ".", "saturation", ".", "get", "(", ")", "v", "=", "self", ".", "value", ".", "get", "(", ")", "sel_color", "=", "hsv_to_rgb", "(", "h", ",", "s", ",", "v", ")", "self", ".", "red", ".", "set", "(", "sel_color", "[", "0", "]", ")", "self", ".", "green", ".", "set", "(", "sel_color", "[", "1", "]", ")", "self", ".", "blue", ".", "set", "(", "sel_color", "[", "2", "]", ")", "if", "self", ".", "alpha_channel", ":", "sel_color", "+=", "(", "self", ".", "alpha", ".", "get", "(", ")", ",", ")", "self", ".", "alphabar", ".", "set_color", "(", "sel_color", ")", "hexa", "=", "rgb_to_hexa", "(", "*", "sel_color", ")", "self", ".", "hexa", ".", "delete", "(", "0", ",", "\"end\"", ")", "self", ".", "hexa", ".", "insert", "(", "0", ",", "hexa", ")", "self", ".", "square", ".", "set_hsv", "(", "(", "h", ",", "s", ",", "v", ")", ")", "self", ".", "bar", ".", "set", "(", "h", ")", "self", ".", "_update_preview", "(", ")" ]
Perform general point symmetry operation on a geometry .
def symm_op ( g , ax , theta , do_refl ) : # Imports import numpy as np # Depend on lower functions' geometry vector coercion. Just # do the rotation and, if indicated, the reflection. gx = geom_rotate ( g , ax , theta ) if do_refl : gx = geom_reflect ( gx , ax ) ## end if # Should be good to go return gx
1,438
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L200-L218
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Identify highest - order symmetry for a geometry on a given axis .
def geom_find_rotsymm ( g , atwts , ax , improp , nmax = _DEF . SYMM_MATCH_NMAX , tol = _DEF . SYMM_MATCH_TOL ) : # Imports import numpy as np # Vectorize the geometry g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) # Ensure a 3-D axis vector ax = make_nd_vec ( ax , nd = 3 , t = np . float64 , norm = True ) # Loop downward either until a good axis is found or nval < 1 # Should never traverse below n == 1 for regular rotation check; # could for improper, though. nval = nmax + 1 nfac = 1.0 while nfac > tol and nval > 0 : nval = nval - 1 try : nfac = geom_symm_match ( g , atwts , ax , 2 * np . pi / nval , improp ) except ZeroDivisionError as zde : # If it's because nval == zero, ignore. Else re-raise. if nval > 0 : raise zde ## end if ## end try ## loop # Should be good to return return nval , nfac
1,439
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L305-L345
[ "def", "_GetDecodedStreamSize", "(", "self", ")", ":", "self", ".", "_file_object", ".", "seek", "(", "0", ",", "os", ".", "SEEK_SET", ")", "self", ".", "_decoder", "=", "self", ".", "_GetDecoder", "(", ")", "self", ".", "_decoded_data", "=", "b''", "encoded_data_offset", "=", "0", "encoded_data_size", "=", "self", ".", "_file_object", ".", "get_size", "(", ")", "decoded_stream_size", "=", "0", "while", "encoded_data_offset", "<", "encoded_data_size", ":", "read_count", "=", "self", ".", "_ReadEncodedData", "(", "self", ".", "_ENCODED_DATA_BUFFER_SIZE", ")", "if", "read_count", "==", "0", ":", "break", "encoded_data_offset", "+=", "read_count", "decoded_stream_size", "+=", "self", ".", "_decoded_data_size", "return", "decoded_stream_size" ]
Extract a subset of a geometry matching a desired atom .
def g_subset ( g , atwts , atwt , digits = _DEF . SYMM_ATWT_ROUND_DIGITS ) : # Imports import numpy as np # Ensure g and atwts are n-D vectors g = make_nd_vec ( g , nd = None , t = np . float64 , norm = False ) atwts = make_nd_vec ( atwts , nd = None , t = np . float64 , norm = False ) # Ensure dims match (should already be checked at object creation...) if not ( len ( g ) == 3 * len ( atwts ) ) : raise ValueError ( "Dim mismatch [len(g) != 3*len(ats)]." ) ## end if # Pull into coordinate groups co = np . split ( g , g . shape [ 0 ] // 3 ) # Filter by the indicated atomic weight cf = [ c for ( c , a ) in zip ( co , atwts ) if np . round ( a , digits ) == np . round ( atwt , digits ) ] # Expand back to single vector, if possible if not cf == [ ] : g_sub = np . concatenate ( cf , axis = 0 ) g_sub = g_sub . reshape ( ( g_sub . shape [ 0 ] , 1 ) ) else : g_sub = [ ] ## end if # Return the subset return g_sub
1,440
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L698-L734
[ "def", "read_data", "(", "self", ",", "blocksize", "=", "4096", ")", ":", "frames", "=", "ctypes", ".", "c_uint", "(", "blocksize", "//", "self", ".", "_client_fmt", ".", "mBytesPerFrame", ")", "buf", "=", "ctypes", ".", "create_string_buffer", "(", "blocksize", ")", "buflist", "=", "AudioBufferList", "(", ")", "buflist", ".", "mNumberBuffers", "=", "1", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mNumberChannels", "=", "self", ".", "_client_fmt", ".", "mChannelsPerFrame", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "=", "blocksize", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", "=", "ctypes", ".", "cast", "(", "buf", ",", "ctypes", ".", "c_void_p", ")", "while", "True", ":", "check", "(", "_coreaudio", ".", "ExtAudioFileRead", "(", "self", ".", "_obj", ",", "ctypes", ".", "byref", "(", "frames", ")", ",", "ctypes", ".", "byref", "(", "buflist", ")", ")", ")", "assert", "buflist", ".", "mNumberBuffers", "==", "1", "size", "=", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mDataByteSize", "if", "not", "size", ":", "break", "data", "=", "ctypes", ".", "cast", "(", "buflist", ".", "mBuffers", "[", "0", "]", ".", "mData", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_char", ")", ")", "blob", "=", "data", "[", ":", "size", "]", "yield", "blob" ]
Generate block - diagonal reflection matrix about nv .
def mtx_refl ( nv , reps = 1 ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM # Ensure |nv| is large enough for confident directionality if spla . norm ( nv ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Norm of 'nv' is too small." ) ## end if # Ensure nv is a normalized np.float64 3-vector nv = make_nd_vec ( nv , nd = 3 , t = np . float64 , norm = True ) # Ensure reps is a positive scalar integer if not np . isscalar ( reps ) : raise ValueError ( "'reps' must be scalar." ) ## end if if not np . issubdtype ( type ( reps ) , int ) : raise ValueError ( "'reps' must be an integer." ) ## end if if not reps > 0 : raise ValueError ( "'reps' must be a positive integer." ) ## end if # Initialize the single-point reflection transform matrix base_mtx = np . zeros ( shape = ( 3 , 3 ) , dtype = np . float64 ) # Construct the single-point transform matrix for i in range ( 3 ) : for j in range ( i , 3 ) : if i == j : base_mtx [ i , j ] = 1 - 2 * nv [ i ] ** 2 else : base_mtx [ i , j ] = base_mtx [ j , i ] = - 2 * nv [ i ] * nv [ j ] ## end if ## next j ## next i # Construct the block-diagonal replicated reflection matrix refl_mtx = spla . block_diag ( * [ base_mtx for i in range ( reps ) ] ) # Return the result return refl_mtx
1,441
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L779-L832
[ "def", "current_portfolio_weights", "(", "self", ")", ":", "position_values", "=", "pd", ".", "Series", "(", "{", "asset", ":", "(", "position", ".", "last_sale_price", "*", "position", ".", "amount", "*", "asset", ".", "price_multiplier", ")", "for", "asset", ",", "position", "in", "self", ".", "positions", ".", "items", "(", ")", "}", ")", "return", "position_values", "/", "self", ".", "portfolio_value" ]
Generate block - diagonal rotation matrix about ax .
def mtx_rot ( ax , theta , reps = 1 ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM # Ensure |ax| is large enough for confident directionality if spla . norm ( ax ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Norm of 'ax' is too small." ) ## end if # Ensure ax is a normalized np.float64 3-vector ax = make_nd_vec ( ax , nd = 3 , t = np . float64 , norm = True ) # Ensure reps is a positive scalar integer if not np . isscalar ( reps ) : raise ValueError ( "'reps' must be scalar." ) ## end if if not np . issubdtype ( type ( reps ) , int ) : raise ValueError ( "'reps' must be an integer." ) ## end if if not reps > 0 : raise ValueError ( "'reps' must be a positive integer." ) ## end if # Ensure theta is scalar if not np . isscalar ( theta ) : raise ValueError ( "'theta' must be scalar." ) ## end if # Assemble the modified Levi-Civita matrix mod_lc = np . array ( [ [ 0 , - ax [ 2 ] , ax [ 1 ] ] , [ ax [ 2 ] , 0 , - ax [ 0 ] ] , [ - ax [ 1 ] , ax [ 0 ] , 0 ] ] , dtype = np . float64 ) # Compute the outer product of the axis vector ax_oprod = np . dot ( ax . reshape ( ( 3 , 1 ) ) , ax . reshape ( ( 1 , 3 ) ) ) # Construct the base matrix # Will need to refer to external math to explain this. base_mtx = np . add ( np . add ( ( 1.0 - np . cos ( theta ) ) * ax_oprod , np . cos ( theta ) * np . eye ( 3 ) ) , np . sin ( theta ) * mod_lc ) # Construct the block-diagonal replicated reflection matrix rot_mtx = spla . block_diag ( * [ base_mtx for i in range ( reps ) ] ) # Return the result return rot_mtx
1,442
https://github.com/bskinn/opan/blob/0b1b21662df6abc971407a9386db21a8796fbfe5/opan/utils/symm.py#L837-L896
[ "def", "update_watervolume_v1", "(", "self", ")", ":", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "old", "=", "self", ".", "sequences", ".", "states", ".", "fastaccess_old", "new", "=", "self", ".", "sequences", ".", "states", ".", "fastaccess_new", "new", ".", "watervolume", "=", "(", "old", ".", "watervolume", "+", "der", ".", "seconds", "*", "(", "flu", ".", "inflow", "-", "flu", ".", "outflow", ")", "/", "1e6", ")" ]
First - Fit
def ff ( items , targets ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : for target , content in bins : if item <= ( target - sum ( content ) ) : content . append ( item ) break else : skip . append ( item ) return bins , skip
1,443
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L22-L40
[ "def", "add_document", "(", "self", ",", "doc_url", ",", "data", ")", ":", "file_path", "=", "self", ".", "__generate_filepath", "(", ")", "with", "open", "(", "file_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "\"SELECT * FROM documents WHERE url=?\"", ",", "(", "str", "(", "doc_url", ")", ",", ")", ")", "for", "row", "in", "c", ".", "fetchall", "(", ")", ":", "old_file_path", "=", "row", "[", "1", "]", "if", "os", ".", "path", ".", "isfile", "(", "old_file_path", ")", ":", "os", ".", "unlink", "(", "old_file_path", ")", "c", ".", "execute", "(", "\"DELETE FROM documents WHERE url=?\"", ",", "(", "str", "(", "doc_url", ")", ",", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "execute", "(", "\"INSERT INTO documents VALUES (?, ?, ?)\"", ",", "(", "str", "(", "doc_url", ")", ",", "file_path", ",", "self", ".", "__now_iso_8601", "(", ")", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "close", "(", ")" ]
First - Fit Decreasing
def ffd ( items , targets , * * kwargs ) : sizes = zip ( items , weight ( items , * * kwargs ) ) sizes = sorted ( sizes , key = operator . itemgetter ( 1 ) , reverse = True ) items = map ( operator . itemgetter ( 0 ) , sizes ) return ff ( items , targets )
1,444
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L43-L58
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Max - Rest
def mr ( items , targets , * * kwargs ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : capacities = [ target - sum ( content ) for target , content in bins ] weighted = weight ( capacities , * * kwargs ) ( target , content ) , capacity , _ = max ( zip ( bins , capacities , weighted ) , key = operator . itemgetter ( 2 ) ) if item <= capacity : content . append ( item ) else : skip . append ( item ) return bins , skip
1,445
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L61-L79
[ "def", "add_json_mask", "(", "self", ",", "start", ",", "method_str", ",", "json_producer", ")", ":", "def", "send_json", "(", "drh", ",", "rem_path", ")", ":", "obj", "=", "json_producer", "(", "drh", ",", "rem_path", ")", "if", "not", "isinstance", "(", "obj", ",", "Response", ")", ":", "obj", "=", "Response", "(", "obj", ")", "ctype", "=", "obj", ".", "get_ctype", "(", "\"application/json\"", ")", "code", "=", "obj", ".", "code", "obj", "=", "obj", ".", "response", "if", "obj", "is", "None", ":", "drh", ".", "send_error", "(", "404", ",", "\"File not found\"", ")", "return", "None", "f", "=", "BytesIO", "(", ")", "json_str", "=", "json_dumps", "(", "obj", ")", "if", "isinstance", "(", "json_str", ",", "(", "str", ",", "unicode", ")", ")", ":", "try", ":", "json_str", "=", "json_str", ".", "decode", "(", "'utf8'", ")", "except", "AttributeError", ":", "pass", "json_str", "=", "json_str", ".", "encode", "(", "'utf8'", ")", "f", ".", "write", "(", "json_str", ")", "f", ".", "flush", "(", ")", "size", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "0", ")", "# handle ETag caching", "if", "drh", ".", "request_version", ">=", "\"HTTP/1.1\"", ":", "e_tag", "=", "\"{0:x}\"", ".", "format", "(", "zlib", ".", "crc32", "(", "f", ".", "read", "(", ")", ")", "&", "0xFFFFFFFF", ")", "f", ".", "seek", "(", "0", ")", "match", "=", "_getheader", "(", "drh", ".", "headers", ",", "'if-none-match'", ")", "if", "match", "is", "not", "None", ":", "if", "drh", ".", "check_cache", "(", "e_tag", ",", "match", ")", ":", "f", ".", "close", "(", ")", "return", "None", "drh", ".", "send_header", "(", "\"ETag\"", ",", "e_tag", ",", "end_header", "=", "True", ")", "drh", ".", "send_header", "(", "\"Cache-Control\"", ",", "\"max-age={0}\"", ".", "format", "(", "self", ".", "max_age", ")", ",", "end_header", "=", "True", ")", "drh", ".", "send_response", "(", "code", ")", "drh", ".", "send_header", "(", "\"Content-Type\"", ",", "ctype", ")", "drh", ".", "send_header", "(", "\"Content-Length\"", ",", "size", ")", "drh", ".", "end_headers", "(", ")", "return", "f", "self", ".", "_add_file_mask", "(", "start", ",", "method_str", ",", "send_json", ")" ]
Best - Fit
def bf ( items , targets , * * kwargs ) : bins = [ ( target , [ ] ) for target in targets ] skip = [ ] for item in items : containers = [ ] capacities = [ ] for target , content in bins : capacity = target - sum ( content ) if item <= capacity : containers . append ( content ) capacities . append ( capacity - item ) if len ( capacities ) : weighted = zip ( containers , weight ( capacities , * * kwargs ) ) content , _ = min ( weighted , key = operator . itemgetter ( 1 ) ) content . append ( item ) else : skip . append ( item ) return bins , skip
1,446
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L90-L113
[ "def", "add_document", "(", "self", ",", "doc_url", ",", "data", ")", ":", "file_path", "=", "self", ".", "__generate_filepath", "(", ")", "with", "open", "(", "file_path", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "data", ")", "c", "=", "self", ".", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "\"SELECT * FROM documents WHERE url=?\"", ",", "(", "str", "(", "doc_url", ")", ",", ")", ")", "for", "row", "in", "c", ".", "fetchall", "(", ")", ":", "old_file_path", "=", "row", "[", "1", "]", "if", "os", ".", "path", ".", "isfile", "(", "old_file_path", ")", ":", "os", ".", "unlink", "(", "old_file_path", ")", "c", ".", "execute", "(", "\"DELETE FROM documents WHERE url=?\"", ",", "(", "str", "(", "doc_url", ")", ",", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "execute", "(", "\"INSERT INTO documents VALUES (?, ?, ?)\"", ",", "(", "str", "(", "doc_url", ")", ",", "file_path", ",", "self", ".", "__now_iso_8601", "(", ")", ")", ")", "self", ".", "conn", ".", "commit", "(", ")", "c", ".", "close", "(", ")" ]
Best - Fit Decreasing
def bfd ( items , targets , * * kwargs ) : sizes = zip ( items , weight ( items , * * kwargs ) ) sizes = sorted ( sizes , key = operator . itemgetter ( 1 ) , reverse = True ) items = map ( operator . itemgetter ( 0 ) , sizes ) return bf ( items , targets , * * kwargs )
1,447
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/binpack.py#L116-L124
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
A VariantSequence and ReferenceContext may contain a different number of nucleotides before the variant locus . Furthermore the VariantSequence is always expressed in terms of the positive strand against which it aligned but reference transcripts may have sequences from the negative strand of the genome . Take the reverse complement of the VariantSequence if the ReferenceContext is from negative strand transcripts and trim either sequence to ensure that the prefixes are of the same length .
def trim_sequences ( variant_sequence , reference_context ) : cdna_prefix = variant_sequence . prefix cdna_alt = variant_sequence . alt cdna_suffix = variant_sequence . suffix # if the transcript is on the reverse strand then we have to # take the sequence PREFIX|VARIANT|SUFFIX # and take the complement of XIFFUS|TNAIRAV|XIFERP if reference_context . strand == "-" : # notice that we are setting the *prefix* to be reverse complement # of the *suffix* and vice versa cdna_prefix , cdna_alt , cdna_suffix = ( reverse_complement_dna ( cdna_suffix ) , reverse_complement_dna ( cdna_alt ) , reverse_complement_dna ( cdna_prefix ) ) reference_sequence_before_variant = reference_context . sequence_before_variant_locus reference_sequence_after_variant = reference_context . sequence_after_variant_locus # trim the reference prefix and the RNA-derived prefix sequences to the same length if len ( reference_sequence_before_variant ) > len ( cdna_prefix ) : n_trimmed_from_reference = len ( reference_sequence_before_variant ) - len ( cdna_prefix ) n_trimmed_from_variant = 0 elif len ( reference_sequence_before_variant ) < len ( cdna_prefix ) : n_trimmed_from_variant = len ( cdna_prefix ) - len ( reference_sequence_before_variant ) n_trimmed_from_reference = 0 else : n_trimmed_from_variant = 0 n_trimmed_from_reference = 0 reference_sequence_before_variant = reference_sequence_before_variant [ n_trimmed_from_reference : ] cdna_prefix = cdna_prefix [ n_trimmed_from_variant : ] return ( cdna_prefix , cdna_alt , cdna_suffix , reference_sequence_before_variant , reference_sequence_after_variant , n_trimmed_from_reference )
1,448
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L140-L213
[ "def", "fetch", "(", "self", ",", "raise_exc", "=", "True", ")", ":", "self", ".", "_request", "(", "GET", ",", "raise_exc", "=", "raise_exc", ")", "# ingests response", "self", ".", "fetched", "=", "True", "return", "self", ".", "state", ".", "copy", "(", ")" ]
Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus .
def count_mismatches_before_variant ( reference_prefix , cdna_prefix ) : if len ( reference_prefix ) != len ( cdna_prefix ) : raise ValueError ( "Expected reference prefix '%s' to be same length as %s" % ( reference_prefix , cdna_prefix ) ) return sum ( xi != yi for ( xi , yi ) in zip ( reference_prefix , cdna_prefix ) )
1,449
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L216-L233
[ "def", "wait_all_futures", "(", "self", ",", "futures", ",", "timeout", "=", "None", ",", "event_timeout", "=", "None", ")", ":", "# type: (Union[List[Future], Future, None], float, float) -> None", "if", "timeout", "is", "None", ":", "end", "=", "None", "else", ":", "end", "=", "time", ".", "time", "(", ")", "+", "timeout", "if", "not", "isinstance", "(", "futures", ",", "list", ")", ":", "if", "futures", ":", "futures", "=", "[", "futures", "]", "else", ":", "futures", "=", "[", "]", "filtered_futures", "=", "[", "]", "for", "f", "in", "futures", ":", "if", "f", ".", "done", "(", ")", ":", "if", "f", ".", "exception", "(", ")", "is", "not", "None", ":", "raise", "f", ".", "exception", "(", ")", "else", ":", "filtered_futures", ".", "append", "(", "f", ")", "while", "filtered_futures", ":", "if", "event_timeout", "is", "not", "None", ":", "until", "=", "time", ".", "time", "(", ")", "+", "event_timeout", "if", "end", "is", "not", "None", ":", "until", "=", "min", "(", "until", ",", "end", ")", "else", ":", "until", "=", "end", "self", ".", "_service_futures", "(", "filtered_futures", ",", "until", ")" ]
Computes the number of mismatching nucleotides between two cDNA sequences after a variant locus .
def count_mismatches_after_variant ( reference_suffix , cdna_suffix ) : len_diff = len ( cdna_suffix ) - len ( reference_suffix ) # if the reference is shorter than the read, the read runs into the intron - these count as # mismatches return sum ( xi != yi for ( xi , yi ) in zip ( reference_suffix , cdna_suffix ) ) + max ( 0 , len_diff )
1,450
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L236-L253
[ "def", "setOptimizedForIPTV", "(", "self", ",", "status", ",", "wifiInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Fritz", ".", "getServiceType", "(", "\"setOptimizedForIPTV\"", ")", "+", "str", "(", "wifiInterfaceId", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "if", "status", ":", "setStatus", "=", "1", "else", ":", "setStatus", "=", "0", "arguments", "=", "{", "\"timeout\"", ":", "timeout", ",", "\"NewX_AVM-DE_IPTVoptimize\"", ":", "setStatus", "}", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"X_AVM-DE_SetIPTVOptimized\"", ",", "*", "*", "arguments", ")" ]
Once we ve aligned the variant sequence to the ReferenceContext we need to transfer reading frame from the reference transcripts to the variant sequences .
def compute_offset_to_first_complete_codon ( offset_to_first_complete_reference_codon , n_trimmed_from_reference_sequence ) : if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon : return ( offset_to_first_complete_reference_codon - n_trimmed_from_reference_sequence ) else : n_nucleotides_trimmed_after_first_codon = ( n_trimmed_from_reference_sequence - offset_to_first_complete_reference_codon ) frame = n_nucleotides_trimmed_after_first_codon % 3 return ( 3 - frame ) % 3
1,451
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L256-L282
[ "def", "GetEntries", "(", "self", ",", "parser_mediator", ",", "match", "=", "None", ",", "*", "*", "unused_kwargs", ")", ":", "devices", "=", "match", ".", "get", "(", "'Devices'", ",", "{", "}", ")", "for", "device_identifier", ",", "device_information", "in", "iter", "(", "devices", ".", "items", "(", ")", ")", ":", "datetime_value", "=", "device_information", ".", "get", "(", "'Connected'", ",", "None", ")", "if", "not", "datetime_value", ":", "continue", "event_data", "=", "IPodPlistEventData", "(", ")", "event_data", ".", "device_id", "=", "device_identifier", "# TODO: refactor.", "for", "key", ",", "value", "in", "iter", "(", "device_information", ".", "items", "(", ")", ")", ":", "if", "key", "==", "'Connected'", ":", "continue", "attribute_name", "=", "key", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "setattr", "(", "event_data", ",", "attribute_name", ",", "value", ")", "event", "=", "time_events", ".", "PythonDatetimeEvent", "(", "datetime_value", ",", "definitions", ".", "TIME_DESCRIPTION_LAST_CONNECTED", ")", "parser_mediator", ".", "ProduceEventWithEventData", "(", "event", ",", "event_data", ")" ]
Iteratively trim low - coverage subsequences of a variant sequence until it either matches the given reference context or there are too few nucleotides left in the variant sequence .
def match_variant_sequence_to_reference_context ( variant_sequence , reference_context , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant = False , max_trimming_attempts = 2 ) : variant_sequence_in_reading_frame = None # if we can't get the variant sequence to match this reference # context then keep trimming it by coverage until either for i in range ( max_trimming_attempts + 1 ) : # check the reverse-complemented prefix if the reference context is # on the negative strand since variant sequence is aligned to # genomic DNA (positive strand) variant_sequence_too_short = ( ( reference_context . strand == "+" and len ( variant_sequence . prefix ) < min_transcript_prefix_length ) or ( reference_context . strand == "-" and len ( variant_sequence . suffix ) < min_transcript_prefix_length ) ) if variant_sequence_too_short : logger . info ( "Variant sequence %s shorter than min allowed %d (iter=%d)" , variant_sequence , min_transcript_prefix_length , i + 1 ) return None variant_sequence_in_reading_frame = VariantSequenceInReadingFrame . from_variant_sequence_and_reference_context ( variant_sequence = variant_sequence , reference_context = reference_context ) if variant_sequence_in_reading_frame is None : return None n_mismatch_before_variant = ( variant_sequence_in_reading_frame . number_mismatches_before_variant ) n_mismatch_after_variant = ( variant_sequence_in_reading_frame . number_mismatches_after_variant ) logger . info ( "Iter #%d/%d: %s" % ( i + 1 , max_trimming_attempts + 1 , variant_sequence_in_reading_frame ) ) total_mismatches = n_mismatch_before_variant if include_mismatches_after_variant : total_mismatches += n_mismatch_after_variant if total_mismatches <= max_transcript_mismatches : # if we got a variant sequence + reading frame with sufficiently # few mismatches then call it a day return variant_sequence_in_reading_frame logger . info ( ( "Too many mismatches (%d) between variant sequence %s and " "reference context %s (attempt=%d/%d)" ) , n_mismatch_before_variant , variant_sequence , reference_context , i + 1 , max_trimming_attempts + 1 ) # if portions of the sequence are supported by only 1 read # then try trimming to 2 to see if the better supported # subsequence can be better matched against the reference current_min_coverage = variant_sequence . min_coverage ( ) logger . info ( "Trimming to subsequence covered by at least %d reads" , current_min_coverage + 1 ) variant_sequence = variant_sequence . trim_by_coverage ( current_min_coverage + 1 ) return None
1,452
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L285-L392
[ "def", "setup", "(", "self", ")", ":", "[", "c", ".", "start", "(", ")", "for", "c", "in", "self", ".", "controllers", "]", "[", "c", ".", "wait_to_start", "(", ")", "for", "c", "in", "self", ".", "controllers", "]" ]
If codon table is missing stop codons then add them .
def _check_codons ( self ) : for stop_codon in self . stop_codons : if stop_codon in self . codon_table : if self . codon_table [ stop_codon ] != "*" : raise ValueError ( ( "Codon '%s' not found in stop_codons, but codon table " "indicates that it should be" ) % ( stop_codon , ) ) else : self . codon_table [ stop_codon ] = "*" for start_codon in self . start_codons : if start_codon not in self . codon_table : raise ValueError ( "Start codon '%s' missing from codon table" % ( start_codon , ) ) for codon , amino_acid in self . codon_table . items ( ) : if amino_acid == "*" and codon not in self . stop_codons : raise ValueError ( "Non-stop codon '%s' can't translate to '*'" % ( codon , ) ) if len ( self . codon_table ) != 64 : raise ValueError ( "Expected 64 codons but found %d in codon table" % ( len ( self . codon_table , ) ) )
1,453
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/genetic_code.py#L26-L54
[ "def", "write_backup_state_to_json_file", "(", "self", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "state_file_path", "=", "self", ".", "config", "[", "\"json_state_file_path\"", "]", "self", ".", "state", "[", "\"walreceivers\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", ",", "\"last_flushed_lsn\"", ":", "value", ".", "last_flushed_lsn", "}", "for", "key", ",", "value", "in", "self", ".", "walreceivers", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"pg_receivexlogs\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", "}", "for", "key", ",", "value", "in", "self", ".", "receivexlogs", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"pg_basebackups\"", "]", "=", "{", "key", ":", "{", "\"latest_activity\"", ":", "value", ".", "latest_activity", ",", "\"running\"", ":", "value", ".", "running", "}", "for", "key", ",", "value", "in", "self", ".", "basebackups", ".", "items", "(", ")", "}", "self", ".", "state", "[", "\"compressors\"", "]", "=", "[", "compressor", ".", "state", "for", "compressor", "in", "self", ".", "compressors", "]", "self", ".", "state", "[", "\"transfer_agents\"", "]", "=", "[", "ta", ".", "state", "for", "ta", "in", "self", ".", "transfer_agents", "]", "self", ".", "state", "[", "\"queues\"", "]", "=", "{", "\"compression_queue\"", ":", "self", ".", "compression_queue", ".", "qsize", "(", ")", ",", "\"transfer_queue\"", ":", "self", ".", "transfer_queue", ".", "qsize", "(", ")", ",", "}", "self", ".", "log", ".", "debug", "(", "\"Writing JSON state file to %r\"", ",", "state_file_path", ")", "write_json_file", "(", "state_file_path", ",", "self", ".", "state", ")", "self", ".", "log", ".", "debug", "(", "\"Wrote JSON state file to disk, took %.4fs\"", ",", "time", ".", "time", "(", ")", "-", "start_time", ")" ]
Make copy of this GeneticCode object with optional replacement values for all fields .
def copy ( self , name , start_codons = None , stop_codons = None , codon_table = None , codon_table_changes = None ) : new_start_codons = ( self . start_codons . copy ( ) if start_codons is None else start_codons ) new_stop_codons = ( self . stop_codons . copy ( ) if stop_codons is None else stop_codons ) new_codon_table = ( self . codon_table . copy ( ) if codon_table is None else codon_table ) if codon_table_changes is not None : new_codon_table . update ( codon_table_changes ) return GeneticCode ( name = name , start_codons = new_start_codons , stop_codons = new_stop_codons , codon_table = new_codon_table )
1,454
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/genetic_code.py#L100-L133
[ "def", "use_comparative_book_view", "(", "self", ")", ":", "self", ".", "_book_view", "=", "COMPARATIVE", "# self._get_provider_session('comment_book_session') # To make sure the session is tracked", "for", "session", "in", "self", ".", "_get_provider_sessions", "(", ")", ":", "try", ":", "session", ".", "use_comparative_book_view", "(", ")", "except", "AttributeError", ":", "pass" ]
Start listening to changes
def start ( self ) : self . running = True self . thread = threading . Thread ( target = self . _main_loop ) self . thread . start ( )
1,455
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/events.py#L86-L92
[ "def", "add_command_line_options_optparse", "(", "cls", ",", "optparser", ")", ":", "def", "set_parameter", "(", "option", ",", "opt", ",", "value", ",", "parser", ")", ":", "\"\"\"callback function for OptionParser\"\"\"", "cls", ".", "config", "[", "opt", "]", "=", "value", "if", "opt", "==", "\"--stomp-conf\"", ":", "cls", ".", "load_configuration_file", "(", "value", ")", "optparser", ".", "add_option", "(", "\"--stomp-host\"", ",", "metavar", "=", "\"HOST\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-host\"", ")", ",", "help", "=", "\"Stomp broker address, default '%default'\"", ",", "type", "=", "\"string\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")", "optparser", ".", "add_option", "(", "\"--stomp-port\"", ",", "metavar", "=", "\"PORT\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-port\"", ")", ",", "help", "=", "\"Stomp broker port, default '%default'\"", ",", "type", "=", "\"int\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")", "optparser", ".", "add_option", "(", "\"--stomp-user\"", ",", "metavar", "=", "\"USER\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-user\"", ")", ",", "help", "=", "\"Stomp user, default '%default'\"", ",", "type", "=", "\"string\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")", "optparser", ".", "add_option", "(", "\"--stomp-pass\"", ",", "metavar", "=", "\"PASS\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-pass\"", ")", ",", "help", "=", "\"Stomp password\"", ",", "type", "=", "\"string\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")", "optparser", ".", "add_option", "(", "\"--stomp-prfx\"", ",", "metavar", "=", "\"PRE\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-prfx\"", ")", ",", "help", "=", "\"Stomp namespace prefix, default '%default'\"", ",", "type", "=", "\"string\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")", "optparser", ".", "add_option", "(", "\"--stomp-conf\"", ",", "metavar", "=", "\"CNF\"", ",", "default", "=", "cls", ".", "defaults", ".", "get", "(", "\"--stomp-conf\"", ")", ",", "help", "=", "\"Stomp configuration file containing connection information, disables default values\"", ",", "type", "=", "\"string\"", ",", "nargs", "=", "1", ",", "action", "=", "\"callback\"", ",", "callback", "=", "set_parameter", ",", ")" ]
Subscribe to given fields .
def subscribe ( self , field_names ) : available_controls = dict ( self . raildriver . get_controller_list ( ) ) . values ( ) for field in field_names : if field not in available_controls : raise ValueError ( 'Cannot subscribe to a missing controller {}' . format ( field ) ) self . subscribed_fields = field_names
1,456
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/events.py#L101-L127
[ "def", "close", "(", "self", ")", ":", "if", "not", "(", "yield", "from", "super", "(", ")", ".", "close", "(", ")", ")", ":", "return", "False", "for", "adapter", "in", "self", ".", "_ethernet_adapters", ".", "values", "(", ")", ":", "if", "adapter", "is", "not", "None", ":", "for", "nio", "in", "adapter", ".", "ports", ".", "values", "(", ")", ":", "if", "nio", "and", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "self", ".", "manager", ".", "port_manager", ".", "release_udp_port", "(", "nio", ".", "lport", ",", "self", ".", "_project", ")", "try", ":", "self", ".", "acpi_shutdown", "=", "False", "yield", "from", "self", ".", "stop", "(", ")", "except", "VMwareError", ":", "pass", "if", "self", ".", "linked_clone", ":", "yield", "from", "self", ".", "manager", ".", "remove_from_vmware_inventory", "(", "self", ".", "_vmx_path", ")" ]
Change the dimensions of the matrix into which the pattern will be drawn . Users of this class should call this method rather than changing the bounds xdensity and ydensity parameters directly . Subclasses can override this method to update any internal data structures that may depend on the matrix dimensions .
def set_matrix_dimensions ( self , bounds , xdensity , ydensity ) : self . bounds = bounds self . xdensity = xdensity self . ydensity = ydensity scs = SheetCoordinateSystem ( bounds , xdensity , ydensity ) for of in self . output_fns : if isinstance ( of , TransferFn ) : of . initialize ( SCS = scs , shape = scs . shape )
1,457
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L273-L288
[ "def", "set_max_order_count", "(", "self", ",", "max_count", ",", "on_error", "=", "'fail'", ")", ":", "control", "=", "MaxOrderCount", "(", "on_error", ",", "max_count", ")", "self", ".", "register_trading_control", "(", "control", ")" ]
Save the state of the output functions to be restored with state_pop .
def state_push ( self ) : for of in self . output_fns : if hasattr ( of , 'state_push' ) : of . state_push ( ) super ( PatternGenerator , self ) . state_push ( )
1,458
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L290-L295
[ "def", "wcs_add_energy_axis", "(", "wcs", ",", "energies", ")", ":", "if", "wcs", ".", "naxis", "!=", "2", ":", "raise", "Exception", "(", "'wcs_add_energy_axis, input WCS naxis != 2 %i'", "%", "wcs", ".", "naxis", ")", "w", "=", "WCS", "(", "naxis", "=", "3", ")", "w", ".", "wcs", ".", "crpix", "[", "0", "]", "=", "wcs", ".", "wcs", ".", "crpix", "[", "0", "]", "w", ".", "wcs", ".", "crpix", "[", "1", "]", "=", "wcs", ".", "wcs", ".", "crpix", "[", "1", "]", "w", ".", "wcs", ".", "ctype", "[", "0", "]", "=", "wcs", ".", "wcs", ".", "ctype", "[", "0", "]", "w", ".", "wcs", ".", "ctype", "[", "1", "]", "=", "wcs", ".", "wcs", ".", "ctype", "[", "1", "]", "w", ".", "wcs", ".", "crval", "[", "0", "]", "=", "wcs", ".", "wcs", ".", "crval", "[", "0", "]", "w", ".", "wcs", ".", "crval", "[", "1", "]", "=", "wcs", ".", "wcs", ".", "crval", "[", "1", "]", "w", ".", "wcs", ".", "cdelt", "[", "0", "]", "=", "wcs", ".", "wcs", ".", "cdelt", "[", "0", "]", "w", ".", "wcs", ".", "cdelt", "[", "1", "]", "=", "wcs", ".", "wcs", ".", "cdelt", "[", "1", "]", "w", "=", "WCS", "(", "w", ".", "to_header", "(", ")", ")", "w", ".", "wcs", ".", "crpix", "[", "2", "]", "=", "1", "w", ".", "wcs", ".", "crval", "[", "2", "]", "=", "energies", "[", "0", "]", "w", ".", "wcs", ".", "cdelt", "[", "2", "]", "=", "energies", "[", "1", "]", "-", "energies", "[", "0", "]", "w", ".", "wcs", ".", "ctype", "[", "2", "]", "=", "'Energy'", "return", "w" ]
Restore the state of the output functions saved by state_push .
def state_pop ( self ) : for of in self . output_fns : if hasattr ( of , 'state_pop' ) : of . state_pop ( ) super ( PatternGenerator , self ) . state_pop ( )
1,459
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L298-L303
[ "def", "getTripInfo", "(", "triple", ")", ":", "col_names", "=", "[", "'mjdate'", ",", "'filter'", ",", "'elongation'", ",", "'discovery'", ",", "'checkup'", ",", "'recovery'", ",", "'iq'", ",", "'block'", "]", "sql", "=", "\"SELECT mjdate md,\"", "sql", "=", "sql", "+", "\" filter, avg(elongation), d.id, checkup.checkup, recovery.recovery , avg(obs_iq_refccd), b.qname \"", "sql", "=", "sql", "+", "\"FROM triple_members t JOIN bucket.exposure e ON t.expnum=e.expnum \"", "sql", "=", "sql", "+", "\"JOIN bucket.blocks b ON b.expnum=e.expnum \"", "sql", "=", "sql", "+", "\"JOIN bucket.circumstance c on e.expnum=c.expnum \"", "sql", "=", "sql", "+", "\"LEFT JOIN discovery d ON t.triple=d.triple \"", "sql", "=", "sql", "+", "\"LEFT JOIN checkup ON t.triple=checkup.triple \"", "sql", "=", "sql", "+", "\"LEFT JOIN recovery ON t.triple=recovery.triple \"", "sql", "=", "sql", "+", "\"WHERE t.triple=%s \"", "sql", "=", "sql", "+", "\"GROUP BY t.triple ORDER BY t.triple \"", "cfeps", ".", "execute", "(", "sql", ",", "(", "triple", ",", ")", ")", "rows", "=", "cfeps", ".", "fetchall", "(", ")", "result", "=", "{", "}", "#import datetime", "for", "idx", "in", "range", "(", "len", "(", "rows", "[", "0", "]", ")", ")", ":", "result", "[", "col_names", "[", "idx", "]", "]", "=", "rows", "[", "0", "]", "[", "idx", "]", "return", "result" ]
Returns a PIL image for this pattern overriding parameters if provided .
def pil ( self , * * params_to_override ) : from PIL . Image import fromarray nchans = self . num_channels ( ) if nchans in [ 0 , 1 ] : mode , arr = None , self ( * * params_to_override ) arr = ( 255.0 / arr . max ( ) * ( arr - arr . min ( ) ) ) . astype ( np . uint8 ) elif nchans in [ 3 , 4 ] : mode = 'RGB' if nchans == 3 else 'RGBA' arr = np . dstack ( self . channels ( * * params_to_override ) . values ( ) [ 1 : ] ) arr = ( 255.0 * arr ) . astype ( np . uint8 ) else : raise ValueError ( "Unsupported number of channels" ) return fromarray ( arr , mode )
1,460
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L394-L411
[ "def", "publish_server_opened", "(", "self", ",", "server_address", ",", "topology_id", ")", ":", "event", "=", "ServerOpeningEvent", "(", "server_address", ",", "topology_id", ")", "for", "subscriber", "in", "self", ".", "__server_listeners", ":", "try", ":", "subscriber", ".", "opened", "(", "event", ")", "except", "Exception", ":", "_handle_exception", "(", ")" ]
Push the state of all generators
def state_push ( self ) : super ( Composite , self ) . state_push ( ) for gen in self . generators : gen . state_push ( )
1,461
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L516-L522
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
Pop the state of all generators
def state_pop ( self ) : super ( Composite , self ) . state_pop ( ) for gen in self . generators : gen . state_pop ( )
1,462
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L524-L530
[ "def", "ReadFD", "(", "self", ",", "Channel", ")", ":", "try", ":", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "msg", "=", "TPCANMsgFDMac", "(", ")", "else", ":", "msg", "=", "TPCANMsgFD", "(", ")", "timestamp", "=", "TPCANTimestampFD", "(", ")", "res", "=", "self", ".", "__m_dllBasic", ".", "CAN_ReadFD", "(", "Channel", ",", "byref", "(", "msg", ")", ",", "byref", "(", "timestamp", ")", ")", "return", "TPCANStatus", "(", "res", ")", ",", "msg", ",", "timestamp", "except", ":", "logger", ".", "error", "(", "\"Exception on PCANBasic.ReadFD\"", ")", "raise" ]
Constructs combined pattern out of the individual ones .
def function ( self , p ) : generators = self . _advance_pattern_generators ( p ) assert hasattr ( p . operator , 'reduce' ) , repr ( p . operator ) + " does not support 'reduce'." # CEBALERT: mask gets applied by all PGs including the Composite itself # (leads to redundant calculations in current lissom_oo_or usage, but # will lead to problems/limitations in the future). patterns = [ pg ( xdensity = p . xdensity , ydensity = p . ydensity , bounds = p . bounds , mask = p . mask , x = p . x + p . size * ( pg . x * np . cos ( p . orientation ) - pg . y * np . sin ( p . orientation ) ) , y = p . y + p . size * ( pg . x * np . sin ( p . orientation ) + pg . y * np . cos ( p . orientation ) ) , orientation = pg . orientation + p . orientation , size = pg . size * p . size ) for pg in generators ] image_array = p . operator . reduce ( patterns ) return image_array
1,463
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L535-L552
[ "def", "_start_console", "(", "self", ")", ":", "self", ".", "_remote_pipe", "=", "yield", "from", "asyncio_open_serial", "(", "self", ".", "_get_pipe_name", "(", ")", ")", "server", "=", "AsyncioTelnetServer", "(", "reader", "=", "self", ".", "_remote_pipe", ",", "writer", "=", "self", ".", "_remote_pipe", ",", "binary", "=", "True", ",", "echo", "=", "True", ")", "self", ".", "_telnet_server", "=", "yield", "from", "asyncio", ".", "start_server", "(", "server", ".", "run", ",", "self", ".", "_manager", ".", "port_manager", ".", "console_host", ",", "self", ".", "console", ")" ]
Create column definition statement .
def compile_column ( name : str , data_type : str , nullable : bool ) -> str : null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},' . format ( name = name , data_type = data_type , null = null_str )
1,464
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/ddl.py#L39-L46
[ "def", "get_directory_properties", "(", "self", ",", "share_name", ",", "directory_name", ",", "timeout", "=", "None", ",", "snapshot", "=", "None", ")", ":", "_validate_not_none", "(", "'share_name'", ",", "share_name", ")", "_validate_not_none", "(", "'directory_name'", ",", "directory_name", ")", "request", "=", "HTTPRequest", "(", ")", "request", ".", "method", "=", "'GET'", "request", ".", "host_locations", "=", "self", ".", "_get_host_locations", "(", ")", "request", ".", "path", "=", "_get_path", "(", "share_name", ",", "directory_name", ")", "request", ".", "query", "=", "{", "'restype'", ":", "'directory'", ",", "'timeout'", ":", "_int_to_str", "(", "timeout", ")", ",", "'sharesnapshot'", ":", "_to_str", "(", "snapshot", ")", "}", "return", "self", ".", "_perform_request", "(", "request", ",", "_parse_directory", ",", "[", "directory_name", "]", ")" ]
Declare materalized view .
def create ( self , no_data = False ) : if self . query : ddl_statement = self . compile_create_as ( ) else : ddl_statement = self . compile_create ( ) if no_data : ddl_statement += '\nWITH NO DATA' return ddl_statement , self . query_values
1,465
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/ddl.py#L97-L108
[ "async", "def", "open_session", "(", "self", ",", "request", ":", "BaseRequestWebsocket", ")", "->", "Session", ":", "return", "await", "ensure_coroutine", "(", "self", ".", "session_interface", ".", "open_session", ")", "(", "self", ",", "request", ")" ]
For a given variant return its set of predicted effects . Optionally filter to transcripts where this variant results in a non - synonymous change to the protein sequence .
def predicted_effects_for_variant ( variant , transcript_id_whitelist = None , only_coding_changes = True ) : effects = [ ] for transcript in variant . transcripts : if only_coding_changes and not transcript . complete : logger . info ( "Skipping transcript %s for variant %s because it's incomplete" , transcript . name , variant ) continue if transcript_id_whitelist and transcript . id not in transcript_id_whitelist : logger . info ( "Skipping transcript %s for variant %s because it's not one of %d allowed" , transcript . name , variant , len ( transcript_id_whitelist ) ) continue effects . append ( variant . effect_on_transcript ( transcript ) ) effects = EffectCollection ( effects ) n_total_effects = len ( effects ) logger . info ( "Predicted total %d effects for variant %s" % ( n_total_effects , variant ) ) if not only_coding_changes : return effects else : nonsynonymous_coding_effects = effects . drop_silent_and_noncoding ( ) logger . info ( "Keeping %d/%d effects which affect protein coding sequence for %s: %s" , len ( nonsynonymous_coding_effects ) , n_total_effects , variant , nonsynonymous_coding_effects ) usable_effects = [ effect for effect in nonsynonymous_coding_effects if effect . mutant_protein_sequence is not None ] logger . info ( "Keeping %d effects with predictable AA sequences for %s: %s" , len ( usable_effects ) , variant , usable_effects ) return usable_effects
1,466
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/effect_prediction.py#L24-L88
[ "def", "prettyPrintPacket", "(", "ctrl_table", ")", ":", "print", "(", "'---------------------------------------'", ")", "print", "(", "\"{:.<29} {}\"", ".", "format", "(", "'id'", ",", "ctrl_table", "[", "'id'", "]", ")", ")", "ctrl_table", ".", "pop", "(", "'id'", ")", "for", "key", ",", "value", "in", "ctrl_table", ".", "items", "(", ")", ":", "print", "(", "\"{:.<29} {}\"", ".", "format", "(", "key", ",", "value", ")", ")" ]
For a given variant find all the transcripts which overlap the variant and for which it has a predictable effect on the amino acid sequence of the protein .
def reference_transcripts_for_variant ( variant , transcript_id_whitelist = None , only_coding_changes = True ) : predicted_effects = predicted_effects_for_variant ( variant = variant , transcript_id_whitelist = transcript_id_whitelist , only_coding_changes = only_coding_changes ) return [ effect . transcript for effect in predicted_effects ]
1,467
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/effect_prediction.py#L91-L104
[ "def", "win32_refresh_window", "(", "cls", ")", ":", "# Get console handle", "handle", "=", "windll", ".", "kernel32", ".", "GetConsoleWindow", "(", ")", "RDW_INVALIDATE", "=", "0x0001", "windll", ".", "user32", ".", "RedrawWindow", "(", "handle", ",", "None", ",", "None", ",", "c_uint", "(", "RDW_INVALIDATE", ")", ")" ]
Returns a pileup column at the specified position . Unclear if a function like this is hiding somewhere in pysam API .
def pileup_reads_at_position ( samfile , chromosome , base0_position ) : # TODO: I want to pass truncate=True, stepper="all" # but for some reason I get this error: # pileup() got an unexpected keyword argument 'truncate' # ...even though these options are listed in the docs for pysam 0.9.0 # for column in samfile . pileup ( chromosome , start = base0_position , end = base0_position + 1 ) : if column . pos != base0_position : # if this column isn't centered on the base before the # variant then keep going continue return column . pileups # if we get to this point then we never saw a pileup at the # desired position return [ ]
1,468
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L215-L240
[ "def", "getCiphertextLen", "(", "self", ",", "ciphertext", ")", ":", "plaintext_length", "=", "self", ".", "getPlaintextLen", "(", "ciphertext", ")", "ciphertext_length", "=", "plaintext_length", "+", "Encrypter", ".", "_CTXT_EXPANSION", "return", "ciphertext_length" ]
Generator that yields a sequence of ReadAtLocus records for reads which contain the positions before and after a variant . The actual work to figure out if what s between those positions matches a variant happens later in the variant_reads module .
def locus_read_generator ( samfile , chromosome , base1_position_before_variant , base1_position_after_variant , use_duplicate_reads = USE_DUPLICATE_READS , use_secondary_alignments = USE_SECONDARY_ALIGNMENTS , min_mapping_quality = MIN_READ_MAPPING_QUALITY ) : logger . debug ( "Gathering reads at locus %s: %d-%d" , chromosome , base1_position_before_variant , base1_position_after_variant ) base0_position_before_variant = base1_position_before_variant - 1 base0_position_after_variant = base1_position_after_variant - 1 count = 0 # We get a pileup at the base before the variant and then check to make sure # that reads also overlap the reference position after the variant. # # TODO: scan over a wider interval of pileups and collect reads that don't # overlap the bases before/after a variant due to splicing for pileup_element in pileup_reads_at_position ( samfile = samfile , chromosome = chromosome , base0_position = base0_position_before_variant ) : read = LocusRead . from_pysam_pileup_element ( pileup_element , base0_position_before_variant = base0_position_before_variant , base0_position_after_variant = base0_position_after_variant , use_secondary_alignments = use_secondary_alignments , use_duplicate_reads = use_duplicate_reads , min_mapping_quality = min_mapping_quality ) if read is not None : count += 1 yield read logger . info ( "Found %d reads overlapping locus %s: %d-%d" , count , chromosome , base1_position_before_variant , base1_position_after_variant )
1,469
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L243-L317
[ "def", "data", "(", "cls", ",", "cube", ",", "weighted", ",", "prune", ")", ":", "return", "cls", "(", ")", ".", "_data", "(", "cube", ",", "weighted", ",", "prune", ")" ]
Traverse a BAM file to find all the reads overlapping a specified locus .
def locus_reads_dataframe ( * args , * * kwargs ) : df_builder = DataFrameBuilder ( LocusRead , variant_columns = False , converters = { "reference_positions" : list_to_string , "quality_scores" : list_to_string , } ) for locus_read in locus_read_generator ( * args , * * kwargs ) : df_builder . add ( variant = None , element = locus_read ) return df_builder . to_dataframe ( )
1,470
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/locus_reads.py#L320-L335
[ "def", "modify_user_power_levels", "(", "self", ",", "users", "=", "None", ",", "users_default", "=", "None", ")", ":", "try", ":", "content", "=", "self", ".", "client", ".", "api", ".", "get_power_levels", "(", "self", ".", "room_id", ")", "if", "users_default", ":", "content", "[", "\"users_default\"", "]", "=", "users_default", "if", "users", ":", "if", "\"users\"", "in", "content", ":", "content", "[", "\"users\"", "]", ".", "update", "(", "users", ")", "else", ":", "content", "[", "\"users\"", "]", "=", "users", "# Remove any keys with value None", "for", "user", ",", "power_level", "in", "list", "(", "content", "[", "\"users\"", "]", ".", "items", "(", ")", ")", ":", "if", "power_level", "is", "None", ":", "del", "content", "[", "\"users\"", "]", "[", "user", "]", "self", ".", "client", ".", "api", ".", "set_power_levels", "(", "self", ".", "room_id", ",", "content", ")", "return", "True", "except", "MatrixRequestError", ":", "return", "False" ]
Generate copy from csv statement .
def copy_from_csv_sql ( qualified_name : str , delimiter = ',' , encoding = 'utf8' , null_str = '' , header = True , escape_str = '\\' , quote_char = '"' , force_not_null = None , force_null = None ) : options = [ ] options . append ( "DELIMITER '%s'" % delimiter ) options . append ( "NULL '%s'" % null_str ) if header : options . append ( 'HEADER' ) options . append ( "QUOTE '%s'" % quote_char ) options . append ( "ESCAPE '%s'" % escape_str ) if force_not_null : options . append ( _format_force_not_null ( column_names = force_not_null ) ) if force_null : options . append ( _format_force_null ( column_names = force_null ) ) postgres_encoding = get_postgres_encoding ( encoding ) options . append ( "ENCODING '%s'" % postgres_encoding ) copy_sql = _format_copy_csv_sql ( qualified_name , copy_options = options ) return copy_sql
1,471
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml_copy.py#L83-L109
[ "def", "avg_inner_product", "(", "data1", ",", "data2", ",", "bin_size", ")", ":", "assert", "data1", ".", "duration", "==", "data2", ".", "duration", "assert", "data1", ".", "sample_rate", "==", "data2", ".", "sample_rate", "seglen", "=", "int", "(", "bin_size", "*", "data1", ".", "sample_rate", ")", "inner_prod", "=", "[", "]", "for", "idx", "in", "range", "(", "int", "(", "data1", ".", "duration", "/", "bin_size", ")", ")", ":", "start", ",", "end", "=", "idx", "*", "seglen", ",", "(", "idx", "+", "1", ")", "*", "seglen", "norm", "=", "len", "(", "data1", "[", "start", ":", "end", "]", ")", "bin_prod", "=", "2", "*", "sum", "(", "data1", ".", "data", "[", "start", ":", "end", "]", ".", "real", "*", "numpy", ".", "conjugate", "(", "data2", ".", "data", "[", "start", ":", "end", "]", ")", ")", "/", "norm", "inner_prod", ".", "append", "(", "bin_prod", ")", "# Get the median over all bins to avoid outliers due to the presence", "# of a signal in a particular bin.", "inner_median", "=", "complex_median", "(", "inner_prod", ")", "return", "inner_prod", ",", "numpy", ".", "abs", "(", "inner_median", ")", ",", "numpy", ".", "angle", "(", "inner_median", ")" ]
Sort protein sequences in decreasing order of priority
def sort_protein_sequences ( protein_sequences ) : return list ( sorted ( protein_sequences , key = ProteinSequence . ascending_sort_key , reverse = True ) )
1,472
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L182-L190
[ "def", "GetAdGroups", "(", "self", ",", "client_customer_id", ",", "campaign_id", ")", ":", "self", ".", "client", ".", "SetClientCustomerId", "(", "client_customer_id", ")", "selector", "=", "{", "'fields'", ":", "[", "'Id'", ",", "'Name'", ",", "'Status'", "]", ",", "'predicates'", ":", "[", "{", "'field'", ":", "'CampaignId'", ",", "'operator'", ":", "'EQUALS'", ",", "'values'", ":", "[", "campaign_id", "]", "}", ",", "{", "'field'", ":", "'Status'", ",", "'operator'", ":", "'NOT_EQUALS'", ",", "'values'", ":", "[", "'REMOVED'", "]", "}", "]", "}", "adgroups", "=", "self", ".", "client", ".", "GetService", "(", "'AdGroupService'", ")", ".", "get", "(", "selector", ")", "if", "int", "(", "adgroups", "[", "'totalNumEntries'", "]", ")", ">", "0", ":", "return", "adgroups", "[", "'entries'", "]", "else", ":", "return", "None" ]
Translates each coding variant in a collection to one or more Translation objects which are then aggregated into equivalent ProteinSequence objects .
def reads_generator_to_protein_sequences_generator ( variant_and_overlapping_reads_generator , transcript_id_whitelist = None , protein_sequence_length = PROTEIN_SEQUENCE_LENGTH , min_alt_rna_reads = MIN_ALT_RNA_READS , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , min_transcript_prefix_length = MIN_TRANSCRIPT_PREFIX_LENGTH , max_transcript_mismatches = MAX_REFERENCE_TRANSCRIPT_MISMATCHES , include_mismatches_after_variant = INCLUDE_MISMATCHES_AFTER_VARIANT , max_protein_sequences_per_variant = MAX_PROTEIN_SEQUENCES_PER_VARIANT , variant_sequence_assembly = VARIANT_SEQUENCE_ASSEMBLY ) : for ( variant , overlapping_reads ) in variant_and_overlapping_reads_generator : overlapping_transcript_ids = [ t . id for t in variant . transcripts if t . is_protein_coding ] _ , ref , alt = trim_variant ( variant ) overlapping_reads = list ( overlapping_reads ) reads_grouped_by_allele = group_reads_by_allele ( overlapping_reads ) ref_reads = reads_grouped_by_allele . get ( ref , [ ] ) alt_reads = reads_grouped_by_allele . get ( alt , [ ] ) translations = translate_variant_reads ( variant = variant , variant_reads = alt_reads , transcript_id_whitelist = transcript_id_whitelist , protein_sequence_length = protein_sequence_length , min_alt_rna_reads = min_alt_rna_reads , min_variant_sequence_coverage = min_variant_sequence_coverage , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , variant_sequence_assembly = variant_sequence_assembly ) protein_sequences = [ ] for ( key , equivalent_translations ) in groupby ( translations , key_fn = Translation . as_translation_key ) . items ( ) : # get the variant read names, transcript IDs and gene names for # protein sequence we're about to construct alt_reads_supporting_protein_sequence , group_transcript_ids , group_gene_names = ProteinSequence . _summarize_translations ( equivalent_translations ) logger . info ( "%s: %s alt reads supporting protein sequence (gene names = %s)" , key , len ( alt_reads_supporting_protein_sequence ) , group_gene_names ) protein_sequence = ProteinSequence . from_translation_key ( translation_key = key , translations = equivalent_translations , overlapping_reads = overlapping_reads , alt_reads = alt_reads , ref_reads = ref_reads , alt_reads_supporting_protein_sequence = alt_reads_supporting_protein_sequence , transcripts_supporting_protein_sequence = group_transcript_ids , transcripts_overlapping_variant = overlapping_transcript_ids , gene = list ( group_gene_names ) ) logger . info ( "%s: protein sequence = %s" % ( key , protein_sequence . amino_acids ) ) protein_sequences . append ( protein_sequence ) # sort protein sequences before returning the top results protein_sequences = sort_protein_sequences ( protein_sequences ) yield variant , protein_sequences [ : max_protein_sequences_per_variant ]
1,473
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L192-L311
[ "def", "checkpoint", "(", "global_model", ",", "local_model", "=", "None", ")", ":", "sglobal", "=", "pickle", ".", "dumps", "(", "global_model", ")", "if", "local_model", "is", "None", ":", "_LIB", ".", "RabitCheckPoint", "(", "sglobal", ",", "len", "(", "sglobal", ")", ",", "None", ",", "0", ")", "del", "sglobal", "else", ":", "slocal", "=", "pickle", ".", "dumps", "(", "local_model", ")", "_LIB", ".", "RabitCheckPoint", "(", "sglobal", ",", "len", "(", "sglobal", ")", ",", "slocal", ",", "len", "(", "slocal", ")", ")", "del", "slocal", "del", "sglobal" ]
Create a ProteinSequence object from a TranslationKey along with all the extra fields a ProteinSequence requires .
def from_translation_key ( cls , translation_key , translations , overlapping_reads , ref_reads , alt_reads , alt_reads_supporting_protein_sequence , transcripts_overlapping_variant , transcripts_supporting_protein_sequence , gene ) : return cls ( amino_acids = translation_key . amino_acids , variant_aa_interval_start = translation_key . variant_aa_interval_start , variant_aa_interval_end = translation_key . variant_aa_interval_end , ends_with_stop_codon = translation_key . ends_with_stop_codon , frameshift = translation_key . frameshift , translations = translations , overlapping_reads = overlapping_reads , ref_reads = ref_reads , alt_reads = alt_reads , alt_reads_supporting_protein_sequence = ( alt_reads_supporting_protein_sequence ) , transcripts_overlapping_variant = transcripts_overlapping_variant , transcripts_supporting_protein_sequence = ( transcripts_supporting_protein_sequence ) , gene = gene )
1,474
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/protein_sequences.py#L131-L161
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
Table referencing a delete from using primary key join .
def make_delete_table ( table : Table , delete_prefix = 'delete_from__' ) -> Table : name = delete_prefix + table . name primary_key = table . primary_key key_names = set ( primary_key . column_names ) columns = [ column for column in table . columns if column . name in key_names ] table = Table ( name , columns , primary_key ) return table
1,475
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/base.py#L136-L145
[ "def", "makeSolution", "(", "self", ",", "cNrm", ",", "mNrm", ")", ":", "solution", "=", "ConsumerSolution", "(", ")", "# An empty solution to which we'll add state-conditional solutions", "# Calculate the MPC at each market resource gridpoint in each state (if desired)", "if", "self", ".", "CubicBool", ":", "dcda", "=", "self", ".", "EndOfPrdvPP", "/", "self", ".", "uPP", "(", "np", ".", "array", "(", "self", ".", "cNrmNow", ")", ")", "MPC", "=", "dcda", "/", "(", "dcda", "+", "1.0", ")", "self", ".", "MPC_temp", "=", "np", ".", "hstack", "(", "(", "np", ".", "reshape", "(", "self", ".", "MPCmaxNow", ",", "(", "self", ".", "StateCount", ",", "1", ")", ")", ",", "MPC", ")", ")", "interpfunc", "=", "self", ".", "makeCubiccFunc", "else", ":", "interpfunc", "=", "self", ".", "makeLinearcFunc", "# Loop through each current period state and add its solution to the overall solution", "for", "i", "in", "range", "(", "self", ".", "StateCount", ")", ":", "# Set current-period-conditional human wealth and MPC bounds", "self", ".", "hNrmNow_j", "=", "self", ".", "hNrmNow", "[", "i", "]", "self", ".", "MPCminNow_j", "=", "self", ".", "MPCminNow", "[", "i", "]", "if", "self", ".", "CubicBool", ":", "self", ".", "MPC_temp_j", "=", "self", ".", "MPC_temp", "[", "i", ",", ":", "]", "# Construct the consumption function by combining the constrained and unconstrained portions", "self", ".", "cFuncNowCnst", "=", "LinearInterp", "(", "[", "self", ".", "mNrmMin_list", "[", "i", "]", ",", "self", ".", "mNrmMin_list", "[", "i", "]", "+", "1.0", "]", ",", "[", "0.0", ",", "1.0", "]", ")", "cFuncNowUnc", "=", "interpfunc", "(", "mNrm", "[", "i", ",", ":", "]", ",", "cNrm", "[", "i", ",", ":", "]", ")", "cFuncNow", "=", "LowerEnvelope", "(", "cFuncNowUnc", ",", "self", ".", "cFuncNowCnst", ")", "# Make the marginal value function and pack up the current-state-conditional solution", "vPfuncNow", "=", "MargValueFunc", "(", "cFuncNow", ",", "self", ".", "CRRA", ")", "solution_cond", "=", "ConsumerSolution", "(", "cFunc", "=", "cFuncNow", ",", "vPfunc", "=", "vPfuncNow", ",", "mNrmMin", "=", "self", ".", "mNrmMinNow", ")", "if", "self", ".", "CubicBool", ":", "# Add the state-conditional marginal marginal value function (if desired)", "solution_cond", "=", "self", ".", "addvPPfunc", "(", "solution_cond", ")", "# Add the current-state-conditional solution to the overall period solution", "solution", ".", "appendSolution", "(", "solution_cond", ")", "# Add the lower bounds of market resources, MPC limits, human resources,", "# and the value functions to the overall solution", "solution", ".", "mNrmMin", "=", "self", ".", "mNrmMin_list", "solution", "=", "self", ".", "addMPCandHumanWealth", "(", "solution", ")", "if", "self", ".", "vFuncBool", ":", "vFuncNow", "=", "self", ".", "makevFunc", "(", "solution", ")", "solution", ".", "vFunc", "=", "vFuncNow", "# Return the overall solution to this period", "return", "solution" ]
Trims common prefixes from the ref and alt sequences
def trim_variant_fields ( location , ref , alt ) : if len ( alt ) > 0 and ref . startswith ( alt ) : # if alt is a prefix of the ref sequence then we actually have a # deletion like: # g.10 GTT > GT # which can be trimmed to # g.12 'T'>'' ref = ref [ len ( alt ) : ] location += len ( alt ) alt = "" if len ( ref ) > 0 and alt . startswith ( ref ) : # if ref sequence is a prefix of the alt sequence then we actually have # an insertion like: # g.10 GT>GTT # which can be trimmed to # g.11 ''>'T' # Note that we are selecting the position *before* the insertion # (as an arbitrary convention) alt = alt [ len ( ref ) : ] location += len ( ref ) - 1 ref = "" return location , ref , alt
1,476
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L27-L64
[ "def", "_update_criteria_with_filters", "(", "self", ",", "query", ",", "section_name", ")", ":", "if", "self", ".", "dashboard_cookie", "is", "None", ":", "return", "query", "cookie_criteria", "=", "self", ".", "dashboard_cookie", ".", "get", "(", "section_name", ")", "if", "cookie_criteria", "==", "'mine'", ":", "query", "[", "'Creator'", "]", "=", "self", ".", "member", ".", "getId", "(", ")", "return", "query" ]
Inteval of interbase offsets of the affected reference positions for a particular variant .
def base0_interval_for_variant ( variant ) : base1_location , ref , alt = trim_variant ( variant ) return base0_interval_for_variant_fields ( base1_location = base1_location , ref = ref , alt = alt )
1,477
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L110-L125
[ "async", "def", "commission", "(", "self", ",", "*", ",", "enable_ssh", ":", "bool", "=", "None", ",", "skip_networking", ":", "bool", "=", "None", ",", "skip_storage", ":", "bool", "=", "None", ",", "commissioning_scripts", ":", "typing", ".", "Sequence", "[", "str", "]", "=", "None", ",", "testing_scripts", ":", "typing", ".", "Sequence", "[", "str", "]", "=", "None", ",", "wait", ":", "bool", "=", "False", ",", "wait_interval", ":", "int", "=", "5", ")", ":", "params", "=", "{", "\"system_id\"", ":", "self", ".", "system_id", "}", "if", "enable_ssh", "is", "not", "None", ":", "params", "[", "\"enable_ssh\"", "]", "=", "enable_ssh", "if", "skip_networking", "is", "not", "None", ":", "params", "[", "\"skip_networking\"", "]", "=", "skip_networking", "if", "skip_storage", "is", "not", "None", ":", "params", "[", "\"skip_storage\"", "]", "=", "skip_storage", "if", "(", "commissioning_scripts", "is", "not", "None", "and", "len", "(", "commissioning_scripts", ")", ">", "0", ")", ":", "params", "[", "\"commissioning_scripts\"", "]", "=", "\",\"", ".", "join", "(", "commissioning_scripts", ")", "if", "testing_scripts", "is", "not", "None", ":", "if", "len", "(", "testing_scripts", ")", "==", "0", "or", "testing_scripts", "==", "\"none\"", ":", "params", "[", "\"testing_scripts\"", "]", "=", "[", "\"none\"", "]", "else", ":", "params", "[", "\"testing_scripts\"", "]", "=", "\",\"", ".", "join", "(", "testing_scripts", ")", "self", ".", "_data", "=", "await", "self", ".", "_handler", ".", "commission", "(", "*", "*", "params", ")", "if", "not", "wait", ":", "return", "self", "else", ":", "# Wait for the machine to be fully commissioned.", "while", "self", ".", "status", "in", "[", "NodeStatus", ".", "COMMISSIONING", ",", "NodeStatus", ".", "TESTING", "]", ":", "await", "asyncio", ".", "sleep", "(", "wait_interval", ")", "self", ".", "_data", "=", "await", "self", ".", "_handler", ".", "read", "(", "system_id", "=", "self", ".", "system_id", ")", "if", "self", ".", "status", "==", "NodeStatus", ".", "FAILED_COMMISSIONING", ":", "msg", "=", "\"{hostname} failed to commission.\"", ".", "format", "(", "hostname", "=", "self", ".", "hostname", ")", "raise", "FailedCommissioning", "(", "msg", ",", "self", ")", "if", "self", ".", "status", "==", "NodeStatus", ".", "FAILED_TESTING", ":", "msg", "=", "\"{hostname} failed testing.\"", ".", "format", "(", "hostname", "=", "self", ".", "hostname", ")", "raise", "FailedTesting", "(", "msg", ",", "self", ")", "return", "self" ]
Convert from a variant s position in global genomic coordinates on the forward strand to an interval of interbase offsets on a particular transcript s mRNA .
def interbase_range_affected_by_variant_on_transcript ( variant , transcript ) : if variant . is_insertion : if transcript . strand == "+" : # base-1 position of an insertion is the genomic nucleotide # before any inserted mutant nucleotides, so the start offset # of the actual inserted nucleotides is one past that reference # position start_offset = transcript . spliced_offset ( variant . start ) + 1 else : # on the negative strand the genomic base-1 position actually # refers to the transcript base *after* the insertion, so we can # use that as the interbase coordinate for where the insertion # occurs start_offset = transcript . spliced_offset ( variant . start ) # an insertion happens *between* two reference bases # so the start:end offsets coincide end_offset = start_offset else : # reference bases affected by substitution or deletion defined by # range starting at first affected base offsets = [ ] assert len ( variant . ref ) > 0 for dna_pos in range ( variant . start , variant . start + len ( variant . ref ) ) : try : offsets . append ( transcript . spliced_offset ( dna_pos ) ) except ValueError : logger . info ( "Couldn't find position %d from %s on exons of %s" , dna_pos , variant , transcript ) if len ( offsets ) == 0 : raise ValueError ( "Couldn't find any exonic reference bases affected by %s on %s" , variant , transcript ) start_offset = min ( offsets ) end_offset = max ( offsets ) + 1 return ( start_offset , end_offset )
1,478
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_helpers.py#L127-L190
[ "def", "update_aggregation", "(", "self", ",", "course", ",", "aggregationid", ",", "new_data", ")", ":", "student_list", "=", "self", ".", "user_manager", ".", "get_course_registered_users", "(", "course", ",", "False", ")", "# If aggregation is new", "if", "aggregationid", "==", "'None'", ":", "# Remove _id for correct insertion", "del", "new_data", "[", "'_id'", "]", "new_data", "[", "\"courseid\"", "]", "=", "course", ".", "get_id", "(", ")", "# Insert the new aggregation", "result", "=", "self", ".", "database", ".", "aggregations", ".", "insert_one", "(", "new_data", ")", "# Retrieve new aggregation id", "aggregationid", "=", "result", ".", "inserted_id", "new_data", "[", "'_id'", "]", "=", "result", ".", "inserted_id", "aggregation", "=", "new_data", "else", ":", "aggregation", "=", "self", ".", "database", ".", "aggregations", ".", "find_one", "(", "{", "\"_id\"", ":", "ObjectId", "(", "aggregationid", ")", ",", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", "}", ")", "# Check tutors", "new_data", "[", "\"tutors\"", "]", "=", "[", "tutor", "for", "tutor", "in", "new_data", "[", "\"tutors\"", "]", "if", "tutor", "in", "course", ".", "get_staff", "(", ")", "]", "students", ",", "groups", ",", "errored_students", "=", "[", "]", ",", "[", "]", ",", "[", "]", "# Check the students", "for", "student", "in", "new_data", "[", "\"students\"", "]", ":", "if", "student", "in", "student_list", ":", "# Remove user from the other aggregation", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"groups.students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"groups.$.students\"", ":", "student", ",", "\"students\"", ":", "student", "}", "}", ")", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"students\"", ":", "student", "}", ",", "{", "\"$pull\"", ":", "{", "\"students\"", ":", "student", "}", "}", ")", "students", ".", "append", "(", "student", ")", "else", ":", "# Check if user can be registered", "user_info", "=", "self", ".", "user_manager", ".", "get_user_info", "(", "student", ")", "if", "user_info", "is", "None", "or", "student", "in", "aggregation", "[", "\"tutors\"", "]", ":", "errored_students", ".", "append", "(", "student", ")", "else", ":", "students", ".", "append", "(", "student", ")", "removed_students", "=", "[", "student", "for", "student", "in", "aggregation", "[", "\"students\"", "]", "if", "student", "not", "in", "new_data", "[", "\"students\"", "]", "]", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$push\"", ":", "{", "\"students\"", ":", "{", "\"$each\"", ":", "removed_students", "}", "}", "}", ")", "new_data", "[", "\"students\"", "]", "=", "students", "# Check the groups", "for", "group", "in", "new_data", "[", "\"groups\"", "]", ":", "group", "[", "\"students\"", "]", "=", "[", "student", "for", "student", "in", "group", "[", "\"students\"", "]", "if", "student", "in", "new_data", "[", "\"students\"", "]", "]", "if", "len", "(", "group", "[", "\"students\"", "]", ")", "<=", "group", "[", "\"size\"", "]", ":", "groups", ".", "append", "(", "group", ")", "new_data", "[", "\"groups\"", "]", "=", "groups", "# Check for default aggregation", "if", "new_data", "[", "'default'", "]", ":", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"courseid\"", ":", "course", ".", "get_id", "(", ")", ",", "\"default\"", ":", "True", "}", ",", "{", "\"$set\"", ":", "{", "\"default\"", ":", "False", "}", "}", ")", "aggregation", "=", "self", ".", "database", ".", "aggregations", ".", "find_one_and_update", "(", "{", "\"_id\"", ":", "ObjectId", "(", "aggregationid", ")", "}", ",", "{", "\"$set\"", ":", "{", "\"description\"", ":", "new_data", "[", "\"description\"", "]", ",", "\"students\"", ":", "students", ",", "\"tutors\"", ":", "new_data", "[", "\"tutors\"", "]", ",", "\"groups\"", ":", "groups", ",", "\"default\"", ":", "new_data", "[", "'default'", "]", "}", "}", ",", "return_document", "=", "ReturnDocument", ".", "AFTER", ")", "return", "aggregation", ",", "errored_students" ]
Insert a collection of namedtuple records .
def insert ( conn , qualified_name : str , column_names , records ) : query = create_insert_statement ( qualified_name , column_names ) with conn : with conn . cursor ( cursor_factory = NamedTupleCursor ) as cursor : for record in records : cursor . execute ( query , record )
1,479
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L30-L38
[ "def", "update_sandbox_ride", "(", "self", ",", "ride_id", ",", "new_status", ")", ":", "if", "new_status", "not", "in", "VALID_PRODUCT_STATUS", ":", "message", "=", "'{} is not a valid product status.'", "raise", "UberIllegalState", "(", "message", ".", "format", "(", "new_status", ")", ")", "args", "=", "{", "'status'", ":", "new_status", "}", "endpoint", "=", "'v1.2/sandbox/requests/{}'", ".", "format", "(", "ride_id", ")", "return", "self", ".", "_api_call", "(", "'PUT'", ",", "endpoint", ",", "args", "=", "args", ")" ]
Insert many records by chunking data into insert statements .
def insert_many ( conn , tablename , column_names , records , chunksize = 2500 ) : groups = chunks ( records , chunksize ) column_str = ',' . join ( column_names ) insert_template = 'INSERT INTO {table} ({columns}) VALUES {values}' . format ( table = tablename , columns = column_str , values = '{0}' ) with conn : with conn . cursor ( ) as cursor : for recs in groups : record_group = list ( recs ) records_template_str = ',' . join ( [ '%s' ] * len ( record_group ) ) insert_query = insert_template . format ( records_template_str ) cursor . execute ( insert_query , record_group )
1,480
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L41-L60
[ "def", "_api_arguments", "(", "self", ")", ":", "# TC main >= 4.4 token will be passed to jobs.", "self", ".", "add_argument", "(", "'--tc_token'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token'", ")", "self", ".", "add_argument", "(", "'--tc_token_expires'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Token Expiration Time'", ",", "type", "=", "int", ",", ")", "# TC Integrations Server or TC main < 4.4", "self", ".", "add_argument", "(", "'--api_access_id'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Access ID'", ",", "required", "=", "False", ")", "self", ".", "add_argument", "(", "'--api_secret_key'", ",", "default", "=", "None", ",", "help", "=", "'ThreatConnect API Secret Key'", ",", "required", "=", "False", ")", "# Validate ThreatConnect SSL certificate", "self", ".", "add_argument", "(", "'--tc_verify'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Validate the ThreatConnect SSL Cert'", ")" ]
Upsert records .
def upsert_records ( conn , records , upsert_statement ) : with conn : with conn . cursor ( ) as cursor : for record in records : cursor . execute ( upsert_statement , record )
1,481
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L63-L69
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
SQL statement for a joined delete from . Generate SQL statement for deleting the intersection of rows between both tables from table referenced by tablename .
def delete_joined_table_sql ( qualified_name , removing_qualified_name , primary_key ) : condition_template = 't.{}=d.{}' where_clause = ' AND ' . join ( condition_template . format ( pkey , pkey ) for pkey in primary_key ) delete_statement = ( 'DELETE FROM {table} t' ' USING {delete_table} d' ' WHERE {where_clause}' ) . format ( table = qualified_name , delete_table = removing_qualified_name , where_clause = where_clause ) return delete_statement
1,482
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L165-L180
[ "def", "_maximization", "(", "self", ",", "X", ")", ":", "# Iterate through clusters and recalculate mean and covariance", "for", "i", "in", "range", "(", "self", ".", "k", ")", ":", "resp", "=", "np", ".", "expand_dims", "(", "self", ".", "responsibility", "[", ":", ",", "i", "]", ",", "axis", "=", "1", ")", "mean", "=", "(", "resp", "*", "X", ")", ".", "sum", "(", "axis", "=", "0", ")", "/", "resp", ".", "sum", "(", ")", "covariance", "=", "(", "X", "-", "mean", ")", ".", "T", ".", "dot", "(", "(", "X", "-", "mean", ")", "*", "resp", ")", "/", "resp", ".", "sum", "(", ")", "self", ".", "parameters", "[", "i", "]", "[", "\"mean\"", "]", ",", "self", ".", "parameters", "[", "i", "]", "[", "\"cov\"", "]", "=", "mean", ",", "covariance", "# Update weights", "n_samples", "=", "np", ".", "shape", "(", "X", ")", "[", "0", "]", "self", ".", "priors", "=", "self", ".", "responsibility", ".", "sum", "(", "axis", "=", "0", ")", "/", "n_samples" ]
Copy file - like object to database table .
def copy_from_csv ( conn , file , qualified_name : str , delimiter = ',' , encoding = 'utf8' , null_str = '' , header = True , escape_str = '\\' , quote_char = '"' , force_not_null = None , force_null = None ) : copy_sql = copy_from_csv_sql ( qualified_name , delimiter , encoding , null_str = null_str , header = header , escape_str = escape_str , quote_char = quote_char , force_not_null = force_not_null , force_null = force_null ) with conn : with conn . cursor ( ) as cursor : cursor . copy_expert ( copy_sql , file )
1,483
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L237-L261
[ "def", "run_tensorboard", "(", "logdir", ",", "listen_on", "=", "\"0.0.0.0\"", ",", "port", "=", "0", ",", "tensorboard_args", "=", "None", ",", "timeout", "=", "10", ")", ":", "if", "tensorboard_args", "is", "None", ":", "tensorboard_args", "=", "[", "]", "tensorboard_instance", "=", "Process", ".", "create_process", "(", "TENSORBOARD_BINARY", ".", "split", "(", "\" \"", ")", "+", "[", "\"--logdir\"", ",", "logdir", ",", "\"--host\"", ",", "listen_on", ",", "\"--port\"", ",", "str", "(", "port", ")", "]", "+", "tensorboard_args", ")", "try", ":", "tensorboard_instance", ".", "run", "(", ")", "except", "FileNotFoundError", "as", "ex", ":", "raise", "TensorboardNotFoundError", "(", "ex", ")", "# Wait for a message that signaliezes start of Tensorboard", "start", "=", "time", ".", "time", "(", ")", "data", "=", "\"\"", "while", "time", ".", "time", "(", ")", "-", "start", "<", "timeout", ":", "line", "=", "tensorboard_instance", ".", "read_line_stderr", "(", "time_limit", "=", "timeout", ")", "data", "+=", "line", "if", "\"at http://\"", "in", "line", ":", "port", "=", "parse_port_from_tensorboard_output", "(", "line", ")", "# Good case", "return", "port", "elif", "\"TensorBoard attempted to bind to port\"", "in", "line", ":", "break", "tensorboard_instance", ".", "terminate", "(", ")", "raise", "UnexpectedOutputError", "(", "data", ",", "expected", "=", "\"Confirmation that Tensorboard has started\"", ")" ]
Retrieve all user tables .
def get_user_tables ( conn ) : query_string = "select schemaname, relname from pg_stat_user_tables;" with conn . cursor ( ) as cursor : cursor . execute ( query_string ) tables = cursor . fetchall ( ) return tables
1,484
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L13-L21
[ "def", "get_job_list", "(", "logger", ",", "match", ",", "library_mapping", ",", "token", ",", "host", ")", ":", "res", "=", "requests", ".", "get", "(", "host", "+", "'/api/2.0/jobs/list'", ",", "auth", "=", "(", "'token'", ",", "token", ")", ",", ")", "if", "res", ".", "status_code", "==", "200", ":", "job_list", "=", "[", "]", "if", "len", "(", "res", ".", "json", "(", ")", "[", "'jobs'", "]", ")", "==", "0", ":", "return", "[", "]", "for", "job", "in", "res", ".", "json", "(", ")", "[", "'jobs'", "]", ":", "logger", ".", "debug", "(", "'job: {}'", ".", "format", "(", "job", "[", "'settings'", "]", "[", "'name'", "]", ")", ")", "if", "'libraries'", "in", "job", "[", "'settings'", "]", ".", "keys", "(", ")", ":", "for", "library", "in", "job", "[", "'settings'", "]", "[", "'libraries'", "]", ":", "if", "match", ".", "suffix", "in", "library", ".", "keys", "(", ")", ":", "try", ":", "# if in prod_folder, mapping turns uri into name", "job_library_uri", "=", "basename", "(", "library", "[", "match", ".", "suffix", "]", ")", "job_match", "=", "library_mapping", "[", "job_library_uri", "]", "except", "KeyError", ":", "logger", ".", "debug", "(", "'not in library map: {}'", ".", "format", "(", "job_library_uri", ")", ")", "pass", "else", ":", "if", "match", ".", "replace_version", "(", "job_match", ",", "logger", ")", ":", "job_list", ".", "append", "(", "{", "'job_id'", ":", "job", "[", "'job_id'", "]", ",", "'job_name'", ":", "job", "[", "'settings'", "]", "[", "'name'", "]", ",", "'library_path'", ":", "library", "[", "match", ".", "suffix", "]", ",", "}", ")", "else", ":", "logger", ".", "debug", "(", "'not replacable: {}'", ".", "format", "(", "job_match", ".", "filename", ")", ")", "else", ":", "logger", ".", "debug", "(", "'no matching suffix: looking for {}, found {}'", ".", "format", "(", "match", ".", "suffix", ",", "str", "(", "library", ".", "keys", "(", ")", ")", ")", ")", "return", "job_list", "else", ":", "raise", "APIError", "(", "res", ")" ]
Returns column data following db . Column parameter specification .
def get_column_metadata ( conn , table : str , schema = 'public' ) : query = """\ SELECT attname as name, format_type(atttypid, atttypmod) AS data_type, NOT attnotnull AS nullable FROM pg_catalog.pg_attribute WHERE attrelid=%s::regclass AND attnum > 0 AND NOT attisdropped ORDER BY attnum;""" qualified_name = compile_qualified_name ( table , schema = schema ) for record in select_dict ( conn , query , params = ( qualified_name , ) ) : yield record
1,485
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L47-L62
[ "def", "weld_invert", "(", "array", ")", ":", "obj_id", ",", "weld_obj", "=", "create_weld_object", "(", "array", ")", "weld_template", "=", "\"\"\"result(\n for({array},\n appender[bool],\n |b: appender[bool], i: i64, e: bool|\n if(e, merge(b, false), merge(b, true))\n )\n)\"\"\"", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "array", "=", "obj_id", ")", "return", "weld_obj" ]
Reflect basic table attributes .
def reflect_table ( conn , table_name , schema = 'public' ) : column_meta = list ( get_column_metadata ( conn , table_name , schema = schema ) ) primary_key_columns = list ( get_primary_keys ( conn , table_name , schema = schema ) ) columns = [ Column ( * * column_data ) for column_data in column_meta ] primary_key = PrimaryKey ( primary_key_columns ) return Table ( table_name , columns , primary_key , schema = schema )
1,486
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L65-L74
[ "def", "get_job_list", "(", "logger", ",", "match", ",", "library_mapping", ",", "token", ",", "host", ")", ":", "res", "=", "requests", ".", "get", "(", "host", "+", "'/api/2.0/jobs/list'", ",", "auth", "=", "(", "'token'", ",", "token", ")", ",", ")", "if", "res", ".", "status_code", "==", "200", ":", "job_list", "=", "[", "]", "if", "len", "(", "res", ".", "json", "(", ")", "[", "'jobs'", "]", ")", "==", "0", ":", "return", "[", "]", "for", "job", "in", "res", ".", "json", "(", ")", "[", "'jobs'", "]", ":", "logger", ".", "debug", "(", "'job: {}'", ".", "format", "(", "job", "[", "'settings'", "]", "[", "'name'", "]", ")", ")", "if", "'libraries'", "in", "job", "[", "'settings'", "]", ".", "keys", "(", ")", ":", "for", "library", "in", "job", "[", "'settings'", "]", "[", "'libraries'", "]", ":", "if", "match", ".", "suffix", "in", "library", ".", "keys", "(", ")", ":", "try", ":", "# if in prod_folder, mapping turns uri into name", "job_library_uri", "=", "basename", "(", "library", "[", "match", ".", "suffix", "]", ")", "job_match", "=", "library_mapping", "[", "job_library_uri", "]", "except", "KeyError", ":", "logger", ".", "debug", "(", "'not in library map: {}'", ".", "format", "(", "job_library_uri", ")", ")", "pass", "else", ":", "if", "match", ".", "replace_version", "(", "job_match", ",", "logger", ")", ":", "job_list", ".", "append", "(", "{", "'job_id'", ":", "job", "[", "'job_id'", "]", ",", "'job_name'", ":", "job", "[", "'settings'", "]", "[", "'name'", "]", ",", "'library_path'", ":", "library", "[", "match", ".", "suffix", "]", ",", "}", ")", "else", ":", "logger", ".", "debug", "(", "'not replacable: {}'", ".", "format", "(", "job_match", ".", "filename", ")", ")", "else", ":", "logger", ".", "debug", "(", "'no matching suffix: looking for {}, found {}'", ".", "format", "(", "match", ".", "suffix", ",", "str", "(", "library", ".", "keys", "(", ")", ")", ")", ")", "return", "job_list", "else", ":", "raise", "APIError", "(", "res", ")" ]
Reset database .
def reset ( db_name ) : conn = psycopg2 . connect ( database = 'postgres' ) db = Database ( db_name ) conn . autocommit = True with conn . cursor ( ) as cursor : cursor . execute ( db . drop_statement ( ) ) cursor . execute ( db . create_statement ( ) ) conn . close ( )
1,487
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L77-L87
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Install Postgres extension if available .
def install_extensions ( extensions , * * connection_parameters ) : from postpy . connections import connect conn = connect ( * * connection_parameters ) conn . autocommit = True for extension in extensions : install_extension ( conn , extension )
1,488
https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/admin.py#L90-L105
[ "def", "rebind_invalidated_ars", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Rebinding retracted/invalidated ARs\"", ")", "# Walk through the Analysis Requests that were generated because of an", "# invalidation, get the source AR and rebind the fields", "relationship", "=", "\"AnalysisRequestChildAnalysisRequest\"", "ref_catalog", "=", "api", ".", "get_tool", "(", "REFERENCE_CATALOG", ")", "retests", "=", "ref_catalog", "(", "relationship", "=", "relationship", ")", "total", "=", "len", "(", "retests", ")", "to_remove", "=", "list", "(", ")", "num", "=", "0", "for", "num", ",", "relation", "in", "enumerate", "(", "retests", ",", "start", "=", "1", ")", ":", "relation", "=", "relation", ".", "getObject", "(", ")", "if", "not", "relation", ":", "continue", "retest", "=", "relation", ".", "getTargetObject", "(", ")", "invalidated", "=", "relation", ".", "getSourceObject", "(", ")", "retest", ".", "setInvalidated", "(", "invalidated", ")", "# Set ParentAnalysisRequest field to None, cause we will use this field", "# for storing Primary-Partitions relationship.", "retest", ".", "setParentAnalysisRequest", "(", "None", ")", "# Remove the relationship!", "to_remove", ".", "append", "(", "(", "relation", ".", "aq_parent", ",", "relation", ".", "id", ")", ")", "if", "num", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "\"Rebinding invalidated ARs: {0}/{1}\"", ".", "format", "(", "num", ",", "total", ")", ")", "# Remove relationships", "for", "relation_to_remove", "in", "to_remove", ":", "folder", "=", "relation_to_remove", "[", "0", "]", "rel_id", "=", "relation_to_remove", "[", "1", "]", "folder", ".", "manage_delObjects", "(", "[", "rel_id", "]", ")", "logger", ".", "info", "(", "\"Rebound {} invalidated ARs\"", ".", "format", "(", "num", ")", ")" ]
Sends a status update to the framework scheduler .
def update ( self , status ) : logging . info ( 'Executor sends status update {} for task {}' . format ( status . state , status . task_id ) ) return self . driver . sendStatusUpdate ( encode ( status ) )
1,489
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/executor.py#L108-L119
[ "def", "lchop", "(", "string", ",", "prefix", ")", ":", "if", "string", ".", "startswith", "(", "prefix", ")", ":", "return", "string", "[", "len", "(", "prefix", ")", ":", "]", "return", "string" ]
Sends a message to the framework scheduler .
def message ( self , data ) : logging . info ( 'Driver sends framework message {}' . format ( data ) ) return self . driver . sendFrameworkMessage ( data )
1,490
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/executor.py#L121-L128
[ "def", "activate_api_deployment", "(", "restApiId", ",", "stageName", ",", "deploymentId", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "response", "=", "conn", ".", "update_stage", "(", "restApiId", "=", "restApiId", ",", "stageName", "=", "stageName", ",", "patchOperations", "=", "[", "{", "'op'", ":", "'replace'", ",", "'path'", ":", "'/deploymentId'", ",", "'value'", ":", "deploymentId", "}", "]", ")", "return", "{", "'set'", ":", "True", ",", "'response'", ":", "_convert_datetime_str", "(", "response", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'set'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
Get current time
def get_current_time ( self ) : hms = [ int ( self . get_current_controller_value ( i ) ) for i in range ( 406 , 409 ) ] return datetime . time ( * hms )
1,491
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L134-L141
[ "def", "start", "(", "st_reg_number", ")", ":", "#st_reg_number = str(st_reg_number)", "weights", "=", "[", "4", ",", "3", ",", "2", ",", "9", ",", "8", ",", "7", ",", "6", ",", "5", ",", "4", ",", "3", ",", "2", "]", "digits", "=", "st_reg_number", "[", ":", "len", "(", "st_reg_number", ")", "-", "2", "]", "check_digits", "=", "st_reg_number", "[", "-", "2", ":", "]", "divisor", "=", "11", "if", "len", "(", "st_reg_number", ")", ">", "13", ":", "return", "False", "sum_total", "=", "0", "for", "i", "in", "range", "(", "len", "(", "digits", ")", ")", ":", "sum_total", "=", "sum_total", "+", "int", "(", "digits", "[", "i", "]", ")", "*", "weights", "[", "i", "]", "rest_division", "=", "sum_total", "%", "divisor", "first_digit", "=", "divisor", "-", "rest_division", "if", "first_digit", "==", "10", "or", "first_digit", "==", "11", ":", "first_digit", "=", "0", "if", "str", "(", "first_digit", ")", "!=", "check_digits", "[", "0", "]", ":", "return", "False", "digits", "=", "digits", "+", "str", "(", "first_digit", ")", "weights", "=", "[", "5", "]", "+", "weights", "sum_total", "=", "0", "for", "i", "in", "range", "(", "len", "(", "digits", ")", ")", ":", "sum_total", "=", "sum_total", "+", "int", "(", "digits", "[", "i", "]", ")", "*", "weights", "[", "i", "]", "rest_division", "=", "sum_total", "%", "divisor", "second_digit", "=", "divisor", "-", "rest_division", "if", "second_digit", "==", "10", "or", "second_digit", "==", "11", ":", "second_digit", "=", "0", "return", "str", "(", "first_digit", ")", "+", "str", "(", "second_digit", ")", "==", "check_digits" ]
Returns the Provider Product and Engine name .
def get_loco_name ( self ) : ret_str = self . dll . GetLocoName ( ) . decode ( ) if not ret_str : return return ret_str . split ( '.:.' )
1,492
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L143-L152
[ "def", "awd_lstm_lm_1150", "(", "dataset_name", "=", "None", ",", "vocab", "=", "None", ",", "pretrained", "=", "False", ",", "ctx", "=", "cpu", "(", ")", ",", "root", "=", "os", ".", "path", ".", "join", "(", "get_home_dir", "(", ")", ",", "'models'", ")", ",", "*", "*", "kwargs", ")", ":", "predefined_args", "=", "{", "'embed_size'", ":", "400", ",", "'hidden_size'", ":", "1150", ",", "'mode'", ":", "'lstm'", ",", "'num_layers'", ":", "3", ",", "'tie_weights'", ":", "True", ",", "'dropout'", ":", "0.4", ",", "'weight_drop'", ":", "0.5", ",", "'drop_h'", ":", "0.2", ",", "'drop_i'", ":", "0.65", ",", "'drop_e'", ":", "0.1", "}", "mutable_args", "=", "frozenset", "(", "[", "'dropout'", ",", "'weight_drop'", ",", "'drop_h'", ",", "'drop_i'", ",", "'drop_e'", "]", ")", "assert", "all", "(", "(", "k", "not", "in", "kwargs", "or", "k", "in", "mutable_args", ")", "for", "k", "in", "predefined_args", ")", ",", "'Cannot override predefined model settings.'", "predefined_args", ".", "update", "(", "kwargs", ")", "return", "_get_rnn_model", "(", "AWDRNN", ",", "'awd_lstm_lm_1150'", ",", "dataset_name", ",", "vocab", ",", "pretrained", ",", "ctx", ",", "root", ",", "*", "*", "predefined_args", ")" ]
Sets controller value
def set_controller_value ( self , index_or_name , value ) : if not isinstance ( index_or_name , int ) : index = self . get_controller_index ( index_or_name ) else : index = index_or_name self . dll . SetControllerValue ( index , ctypes . c_float ( value ) )
1,493
https://github.com/centralniak/py-raildriver/blob/c7f5f551e0436451b9507fc63a62e49a229282b9/raildriver/library.py#L172-L183
[ "def", "console", "(", "self", ",", "console", ")", ":", "if", "console", "==", "self", ".", "_console", ":", "return", "if", "self", ".", "_console_type", "==", "\"vnc\"", "and", "console", "is", "not", "None", "and", "console", "<", "5900", ":", "raise", "NodeError", "(", "\"VNC console require a port superior or equal to 5900 currently it's {}\"", ".", "format", "(", "console", ")", ")", "if", "self", ".", "_console", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_console", ",", "self", ".", "_project", ")", "self", ".", "_console", "=", "None", "if", "console", "is", "not", "None", ":", "if", "self", ".", "console_type", "==", "\"vnc\"", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ",", "port_range_start", "=", "5900", ",", "port_range_end", "=", "6000", ")", "else", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ")", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: console port set to {port}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "port", "=", "console", ")", ")" ]
Stops the scheduler driver .
def stop ( self , failover = False ) : logging . info ( 'Stops Scheduler Driver' ) return self . driver . stop ( failover )
1,494
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L86-L97
[ "def", "upload_member_from_dir", "(", "member_data", ",", "target_member_dir", ",", "metadata", ",", "access_token", ",", "mode", "=", "'default'", ",", "max_size", "=", "MAX_SIZE_DEFAULT", ")", ":", "if", "not", "validate_metadata", "(", "target_member_dir", ",", "metadata", ")", ":", "raise", "ValueError", "(", "'Metadata should match directory contents!'", ")", "project_data", "=", "{", "f", "[", "'basename'", "]", ":", "f", "for", "f", "in", "member_data", "[", "'data'", "]", "if", "f", "[", "'source'", "]", "not", "in", "member_data", "[", "'sources_shared'", "]", "}", "for", "filename", "in", "metadata", ":", "if", "filename", "in", "project_data", "and", "mode", "==", "'safe'", ":", "logging", ".", "info", "(", "'Skipping {}, remote exists with matching'", "' name'", ".", "format", "(", "filename", ")", ")", "continue", "filepath", "=", "os", ".", "path", ".", "join", "(", "target_member_dir", ",", "filename", ")", "remote_file_info", "=", "(", "project_data", "[", "filename", "]", "if", "filename", "in", "project_data", "else", "None", ")", "upload_aws", "(", "target_filepath", "=", "filepath", ",", "metadata", "=", "metadata", "[", "filename", "]", ",", "access_token", "=", "access_token", ",", "project_member_id", "=", "member_data", "[", "'project_member_id'", "]", ",", "remote_file_info", "=", "remote_file_info", ")", "if", "mode", "==", "'sync'", ":", "for", "filename", "in", "project_data", ":", "if", "filename", "not", "in", "metadata", ":", "logging", ".", "debug", "(", "\"Deleting {}\"", ".", "format", "(", "filename", ")", ")", "delete_file", "(", "file_basename", "=", "filename", ",", "access_token", "=", "access_token", ",", "project_member_id", "=", "member_data", "[", "'project_member_id'", "]", ")" ]
Requests resources from Mesos .
def request ( self , requests ) : logging . info ( 'Request resources from Mesos' ) return self . driver . requestResources ( map ( encode , requests ) )
1,495
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L121-L131
[ "def", "undefine", "(", "self", ")", ":", "if", "lib", ".", "EnvUndefglobal", "(", "self", ".", "_env", ",", "self", ".", "_glb", ")", "!=", "1", ":", "raise", "CLIPSError", "(", "self", ".", "_env", ")", "self", ".", "_env", "=", "None" ]
Launches the given set of tasks .
def launch ( self , offer_id , tasks , filters = Filters ( ) ) : logging . info ( 'Launches tasks {}' . format ( tasks ) ) return self . driver . launchTasks ( encode ( offer_id ) , map ( encode , tasks ) , encode ( filters ) )
1,496
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L133-L150
[ "def", "get_or_generate_vocabulary", "(", "data_dir", ",", "tmp_dir", ",", "data_prefix", ",", "max_page_size_exp", ",", "approx_vocab_size", "=", "32768", ",", "strip", "=", "True", ")", ":", "num_pages_for_vocab_generation", "=", "approx_vocab_size", "//", "3", "vocab_file", "=", "vocab_filename", "(", "approx_vocab_size", ",", "strip", ")", "def", "my_generator", "(", "data_prefix", ")", ":", "\"\"\"Line generator for vocab.\"\"\"", "count", "=", "0", "for", "page", "in", "corpus_page_generator", "(", "all_corpus_files", "(", "data_prefix", ")", "[", ":", ":", "-", "1", "]", ",", "tmp_dir", ",", "max_page_size_exp", ")", ":", "revisions", "=", "page", "[", "\"revisions\"", "]", "if", "revisions", ":", "text", "=", "get_text", "(", "revisions", "[", "-", "1", "]", ",", "strip", "=", "strip", ")", "yield", "text", "count", "+=", "1", "if", "count", "%", "100", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"reading pages for vocab %d\"", "%", "count", ")", "if", "count", ">", "num_pages_for_vocab_generation", ":", "break", "return", "generator_utils", ".", "get_or_generate_vocab_inner", "(", "data_dir", ",", "vocab_file", ",", "approx_vocab_size", ",", "my_generator", "(", "data_prefix", ")", ")" ]
Kills the specified task .
def kill ( self , task_id ) : logging . info ( 'Kills task {}' . format ( task_id ) ) return self . driver . killTask ( encode ( task_id ) )
1,497
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L152-L162
[ "def", "urlstate", "(", "self", ",", "encryption_key", ")", ":", "lzma", "=", "LZMACompressor", "(", ")", "urlstate_data", "=", "json", ".", "dumps", "(", "self", ".", "_state_dict", ")", "urlstate_data", "=", "lzma", ".", "compress", "(", "urlstate_data", ".", "encode", "(", "\"UTF-8\"", ")", ")", "urlstate_data", "+=", "lzma", ".", "flush", "(", ")", "urlstate_data", "=", "_AESCipher", "(", "encryption_key", ")", ".", "encrypt", "(", "urlstate_data", ")", "lzma", "=", "LZMACompressor", "(", ")", "urlstate_data", "=", "lzma", ".", "compress", "(", "urlstate_data", ")", "urlstate_data", "+=", "lzma", ".", "flush", "(", ")", "urlstate_data", "=", "base64", ".", "urlsafe_b64encode", "(", "urlstate_data", ")", "return", "urlstate_data", ".", "decode", "(", "\"utf-8\"", ")" ]
Allows the framework to query the status for non - terminal tasks .
def reconcile ( self , statuses ) : logging . info ( 'Reconciles task statuses {}' . format ( statuses ) ) return self . driver . reconcileTasks ( map ( encode , statuses ) )
1,498
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L164-L173
[ "def", "save_reg", "(", "data", ")", ":", "reg_dir", "=", "_reg_dir", "(", ")", "regfile", "=", "os", ".", "path", ".", "join", "(", "reg_dir", ",", "'register'", ")", "try", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "reg_dir", ")", ":", "os", ".", "makedirs", "(", "reg_dir", ")", "except", "OSError", "as", "exc", ":", "if", "exc", ".", "errno", "==", "errno", ".", "EEXIST", ":", "pass", "else", ":", "raise", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "regfile", ",", "'a'", ")", "as", "fh_", ":", "salt", ".", "utils", ".", "msgpack", ".", "dump", "(", "data", ",", "fh_", ")", "except", "Exception", ":", "log", ".", "error", "(", "'Could not write to msgpack file %s'", ",", "__opts__", "[", "'outdir'", "]", ")", "raise" ]
Accepts the given offers and performs a sequence of operations on those accepted offers .
def accept ( self , offer_ids , operations , filters = Filters ( ) ) : logging . info ( 'Accepts offers {}' . format ( offer_ids ) ) return self . driver . acceptOffers ( map ( encode , offer_ids ) , map ( encode , operations ) , encode ( filters ) )
1,499
https://github.com/daskos/mentor/blob/b5fd64e3a3192f5664fa5c03e8517cacb4e0590f/mentor/proxies/scheduler.py#L187-L201
[ "def", "run_example", "(", ")", ":", "weather", "=", "get_weather_data", "(", "'weather.csv'", ")", "my_turbine", ",", "e126", ",", "dummy_turbine", "=", "initialize_wind_turbines", "(", ")", "calculate_power_output", "(", "weather", ",", "my_turbine", ",", "e126", ",", "dummy_turbine", ")", "plot_or_print", "(", "my_turbine", ",", "e126", ",", "dummy_turbine", ")" ]