query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Validate that the provided configurtion is valid .
def _validate_config ( config ) : if not isinstance ( config , list ) : raise TypeError ( 'Config must be a list' ) for config_dict in config : if not isinstance ( config_dict , dict ) : raise TypeError ( 'Config must be a list of dictionaries' ) label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] if not isinstance ( cfg , dict ) : raise TypeError ( 'Config structure is broken' ) if 'host' not in cfg : raise TypeError ( 'Config entries must have a value for host' ) if not isinstance ( cfg [ 'host' ] , str ) and not isinstance ( cfg [ 'host' ] , list ) : raise TypeError ( 'Host must be a string or a list.' ) if 'port' not in cfg : raise TypeError ( 'Config entries must have a value for port' ) if not isinstance ( cfg [ 'port' ] , int ) : raise TypeError ( 'Port must be an int' ) if 'dbpath' not in cfg : raise TypeError ( 'Config entries must have a value for dbpath' ) if not isinstance ( cfg [ 'dbpath' ] , str ) : if not isinstance ( cfg [ 'dbpath' ] , list ) : raise TypeError ( 'Dbpath must either a string or a list of ' 'strings' ) for dbpath in cfg [ 'dbpath' ] : if not isinstance ( dbpath , str ) : raise TypeError ( 'Dbpath must either a string or a list ' 'of strings' ) if ( 'read_preference' in cfg and not isinstance ( cfg [ 'read_preference' ] , str ) ) : raise TypeError ( 'Read_preference must be a string' ) if ( 'replicaSet' in cfg and not isinstance ( cfg [ 'replicaSet' ] , str ) ) : raise TypeError ( 'replicaSet must be a string' )
6,200
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L45-L98
[ "def", "write_bits", "(", "self", ",", "*", "args", ")", ":", "# Would be nice to make this a bit smarter", "if", "len", "(", "args", ")", ">", "8", ":", "raise", "ValueError", "(", "\"Can only write 8 bits at a time\"", ")", "self", ".", "_output_buffer", ".", "append", "(", "chr", "(", "reduce", "(", "lambda", "x", ",", "y", ":", "xor", "(", "x", ",", "args", "[", "y", "]", "<<", "y", ")", ",", "xrange", "(", "len", "(", "args", ")", ")", ",", "0", ")", ")", ")", "return", "self" ]
Builds a dict with information to connect to Clusters .
def _parse_configs ( self , config ) : for config_dict in config : label = config_dict . keys ( ) [ 0 ] cfg = config_dict [ label ] # Transform dbpath to something digestable by regexp. dbpath = cfg [ 'dbpath' ] pattern = self . _parse_dbpath ( dbpath ) read_preference = cfg . get ( 'read_preference' , 'primary' ) . upper ( ) read_preference = self . _get_read_preference ( read_preference ) # Put all parameters that could be passed to pymongo.MongoClient # in a separate dict, to ease MongoClient creation. cluster_config = { 'params' : { 'host' : cfg [ 'host' ] , 'port' : cfg [ 'port' ] , 'read_preference' : read_preference , 'replicaSet' : cfg . get ( 'replicaSet' ) } , 'pattern' : pattern , 'label' : label } self . _clusters . append ( cluster_config )
6,201
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L100-L139
[ "def", "setOverlayTextureColorSpace", "(", "self", ",", "ulOverlayHandle", ",", "eTextureColorSpace", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTextureColorSpace", "result", "=", "fn", "(", "ulOverlayHandle", ",", "eTextureColorSpace", ")", "return", "result" ]
Converts the dbpath to a regexp pattern .
def _parse_dbpath ( dbpath ) : if isinstance ( dbpath , list ) : # Support dbpath param as an array. dbpath = '|' . join ( dbpath ) # Append $ (end of string) so that twit will not match twitter! if not dbpath . endswith ( '$' ) : dbpath = '(%s)$' % dbpath return dbpath
6,202
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L142-L164
[ "def", "_check_channel_state_for_update", "(", "self", ",", "channel_identifier", ":", "ChannelID", ",", "closer", ":", "Address", ",", "update_nonce", ":", "Nonce", ",", "block_identifier", ":", "BlockSpecification", ",", ")", "->", "Optional", "[", "str", "]", ":", "msg", "=", "None", "closer_details", "=", "self", ".", "_detail_participant", "(", "channel_identifier", "=", "channel_identifier", ",", "participant", "=", "closer", ",", "partner", "=", "self", ".", "node_address", ",", "block_identifier", "=", "block_identifier", ",", ")", "if", "closer_details", ".", "nonce", "==", "update_nonce", ":", "msg", "=", "(", "'updateNonClosingBalanceProof transaction has already '", "'been mined and updated the channel succesfully.'", ")", "return", "msg" ]
Converts read_preference from string to pymongo . ReadPreference value .
def _get_read_preference ( read_preference ) : read_preference = getattr ( pymongo . ReadPreference , read_preference , None ) if read_preference is None : raise ValueError ( 'Invalid read preference: %s' % read_preference ) return read_preference
6,203
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L167-L181
[ "def", "delete_volume", "(", "target", ",", "stop", "=", "True", ")", ":", "volinfo", "=", "info", "(", ")", "if", "target", "not", "in", "volinfo", ":", "log", ".", "error", "(", "'Cannot delete non-existing volume %s'", ",", "target", ")", "return", "False", "# Stop volume if requested to and it is running", "running", "=", "(", "volinfo", "[", "target", "]", "[", "'status'", "]", "==", "'1'", ")", "if", "not", "stop", "and", "running", ":", "# Fail if volume is running if stop is not requested", "log", ".", "error", "(", "'Volume %s must be stopped before deletion'", ",", "target", ")", "return", "False", "if", "running", ":", "if", "not", "stop_volume", "(", "target", ",", "force", "=", "True", ")", ":", "return", "False", "cmd", "=", "'volume delete {0}'", ".", "format", "(", "target", ")", "return", "_gluster", "(", "cmd", ")" ]
Set the timeout for existing and future Clients .
def set_timeout ( self , network_timeout ) : # Do nothing if attempting to set the same timeout twice. if network_timeout == self . _network_timeout : return self . _network_timeout = network_timeout self . _disconnect ( )
6,204
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L183-L197
[ "def", "weld_mean", "(", "array", ",", "weld_type", ")", ":", "weld_obj_sum", "=", "weld_aggregate", "(", "array", ",", "weld_type", ",", "'+'", ")", "obj_id", ",", "weld_obj", "=", "create_weld_object", "(", "array", ")", "weld_obj_sum_id", "=", "get_weld_obj_id", "(", "weld_obj", ",", "weld_obj_sum", ")", "weld_template", "=", "_weld_mean_code", "weld_obj", ".", "weld_code", "=", "weld_template", ".", "format", "(", "sum", "=", "weld_obj_sum_id", ",", "array", "=", "obj_id", ")", "return", "weld_obj" ]
Disconnect from all MongoDB Clients .
def _disconnect ( self ) : for cluster in self . _clusters : if 'connection' in cluster : connection = cluster . pop ( 'connection' ) connection . close ( ) # Remove all attributes that are database names so that next time # when they are accessed, __getattr__ will be called and will create # new Clients for dbname in self . _mapped_databases : self . __delattr__ ( dbname ) self . _mapped_databases = [ ]
6,205
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L199-L210
[ "def", "_read_htm_ids", "(", "self", ",", "mount_point", ")", ":", "result", "=", "{", "}", "target_id", "=", "None", "for", "line", "in", "self", ".", "_htm_lines", "(", "mount_point", ")", ":", "target_id", "=", "target_id", "or", "self", ".", "_target_id_from_htm", "(", "line", ")", "return", "target_id", ",", "result" ]
Return a connection to a Cluster .
def _get_connection ( self , cluster ) : # w=1 because: # http://stackoverflow.com/questions/14798552/is-mongodb-2-x-write-concern-w-1-truly-equals-to-safe-true if 'connection' not in cluster : cluster [ 'connection' ] = self . _connection_class ( socketTimeoutMS = self . _network_timeout , w = 1 , j = self . j , * * cluster [ 'params' ] ) return cluster [ 'connection' ]
6,206
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L212-L235
[ "def", "__texUpdate", "(", "self", ",", "frame", ")", ":", "# Retrieve buffer from videosink", "if", "self", ".", "texture_locked", ":", "return", "self", ".", "buffer", "=", "frame", "self", ".", "texUpdated", "=", "True" ]
Map a database name to the Cluster that holds the database .
def _match_dbname ( self , dbname ) : for config in self . _clusters : if re . match ( config [ 'pattern' ] , dbname ) : return config raise Exception ( 'No such database %s.' % dbname )
6,207
https://github.com/uberVU/mongo-pool/blob/286d1d8e0b3c17d5d7d4860487fe69358941067d/mongo_pool/mongo_pool.py#L237-L250
[ "def", "format_row_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "while", "True", ":", "items", "=", "(", "yield", ")", "assert", "all", "(", "isinstance", "(", "x", ",", "VTMLBuffer", ")", "for", "x", "in", "items", ")", "raw", "=", "(", "fn", "(", "x", ")", "for", "x", ",", "fn", "in", "zip", "(", "items", ",", "self", ".", "formatters", ")", ")", "for", "x", "in", "itertools", ".", "zip_longest", "(", "*", "raw", ")", ":", "next_filter", ".", "send", "(", "x", ")" ]
Try execute a function n times until no exception raised or tried max_try times .
def try_ntime ( max_try , func , * args , * * kwargs ) : if max_try < 1 : raise ValueError for i in range ( max_try ) : try : return func ( * args , * * kwargs ) except Exception as e : last_exception = e raise last_exception
6,208
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/flow.py#L9-L29
[ "def", "wave_infochunk", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "file", ":", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"RIFF\"", ":", "return", "None", "data_size", "=", "file", ".", "read", "(", "4", ")", "# container size", "if", "file", ".", "read", "(", "4", ")", "!=", "b\"WAVE\"", ":", "return", "None", "while", "True", ":", "chunkid", "=", "file", ".", "read", "(", "4", ")", "sizebuf", "=", "file", ".", "read", "(", "4", ")", "if", "len", "(", "sizebuf", ")", "<", "4", "or", "len", "(", "chunkid", ")", "<", "4", ":", "return", "None", "size", "=", "struct", ".", "unpack", "(", "b'<L'", ",", "sizebuf", ")", "[", "0", "]", "if", "chunkid", "[", "0", ":", "3", "]", "!=", "b\"fmt\"", ":", "if", "size", "%", "2", "==", "1", ":", "seek", "=", "size", "+", "1", "else", ":", "seek", "=", "size", "file", ".", "seek", "(", "size", ",", "1", ")", "else", ":", "return", "bytearray", "(", "b\"RIFF\"", "+", "data_size", "+", "b\"WAVE\"", "+", "chunkid", "+", "sizebuf", "+", "file", ".", "read", "(", "size", ")", ")" ]
Return HTML for highlightjs JavaScript .
def highlightjs_javascript ( jquery = None ) : javascript = '' # See if we have to include jQuery if jquery is None : jquery = get_highlightjs_setting ( 'include_jquery' , False ) if jquery : url = highlightjs_jquery_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) url = highlightjs_url ( ) if url : javascript += '<script src="{url}"></script>' . format ( url = url ) javascript += '<script>hljs.initHighlightingOnLoad();</script>' return javascript
6,209
https://github.com/MounirMesselmeni/django-highlightjs/blob/3758cae67ed15f38641fb51a71ca9ed85af78345/highlightjs/templatetags/highlightjs.py#L39-L79
[ "def", "remove_users_from_organization", "(", "self", ",", "organization_id", ",", "users_list", ")", ":", "log", ".", "warning", "(", "'Removing users...'", ")", "url", "=", "'rest/servicedeskapi/organization/{}/user'", ".", "format", "(", "organization_id", ")", "data", "=", "{", "'usernames'", ":", "users_list", "}", "return", "self", ".", "delete", "(", "url", ",", "headers", "=", "self", ".", "experimental_headers", ",", "data", "=", "data", ")" ]
a repository lookup by owner and name
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : return json . loads ( ( yield f'/repos/{owner}/{name}' ) . content )
6,210
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/composed0.py#L13-L15
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
a repo lookup by owner and name
def repo ( name : str , owner : str ) -> snug . Query [ dict ] : request = snug . GET ( f'https://api.github.com/repos/{owner}/{name}' ) response = yield request return json . loads ( response . content )
6,211
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/executing_queries.py#L6-L10
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
follow another user
def follow ( name : str ) -> snug . Query [ bool ] : request = snug . PUT ( f'https://api.github.com/user/following/{name}' ) response = yield request return response . status_code == 204
6,212
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/tutorial/executing_queries.py#L13-L17
[ "def", "GetAttachmentIdFromMediaId", "(", "media_id", ")", ":", "altchars", "=", "'+-'", "if", "not", "six", ".", "PY2", ":", "altchars", "=", "altchars", ".", "encode", "(", "'utf-8'", ")", "# altchars for '+' and '/'. We keep '+' but replace '/' with '-'", "buffer", "=", "base64", ".", "b64decode", "(", "str", "(", "media_id", ")", ",", "altchars", ")", "resoure_id_length", "=", "20", "attachment_id", "=", "''", "if", "len", "(", "buffer", ")", ">", "resoure_id_length", ":", "# We are cutting off the storage index.", "attachment_id", "=", "base64", ".", "b64encode", "(", "buffer", "[", "0", ":", "resoure_id_length", "]", ",", "altchars", ")", "if", "not", "six", ".", "PY2", ":", "attachment_id", "=", "attachment_id", ".", "decode", "(", "'utf-8'", ")", "else", ":", "attachment_id", "=", "media_id", "return", "attachment_id" ]
Retrieve the Task Information
def taskinfo ( self ) : task_input = { 'taskName' : 'QueryTask' , 'inputParameters' : { "Task_Name" : self . _name } } info = taskengine . execute ( task_input , self . _engine , cwd = self . _cwd ) task_def = info [ 'outputParameters' ] [ 'DEFINITION' ] task_def [ 'name' ] = str ( task_def . pop ( 'NAME' ) ) task_def [ 'description' ] = str ( task_def . pop ( 'DESCRIPTION' ) ) task_def [ 'displayName' ] = str ( task_def . pop ( 'DISPLAY_NAME' ) ) if 'COMMUTE_ON_SUBSET' in task_def : task_def [ 'commute_on_subset' ] = task_def . pop ( 'COMMUTE_ON_SUBSET' ) if 'COMMUTE_ON_DOWNSAMPLE' in task_def : task_def [ 'commute_on_downsample' ] = task_def . pop ( 'COMMUTE_ON_DOWNSAMPLE' ) # Convert PARAMETERS into a list instead of a dictionary # which matches the gsf side things task_def [ 'parameters' ] = [ v for v in task_def [ 'PARAMETERS' ] . values ( ) ] task_def . pop ( 'PARAMETERS' ) parameters = task_def [ 'parameters' ] for parameter in parameters : parameter [ 'name' ] = str ( parameter . pop ( 'NAME' ) ) parameter [ 'description' ] = str ( parameter . pop ( 'DESCRIPTION' ) ) parameter [ 'display_name' ] = str ( parameter . pop ( 'DISPLAY_NAME' ) ) parameter [ 'required' ] = bool ( parameter . pop ( 'REQUIRED' ) ) if 'MIN' in parameter : parameter [ 'min' ] = parameter . pop ( 'MIN' ) if 'MAX' in parameter : parameter [ 'max' ] = parameter . pop ( 'MAX' ) if parameter [ 'TYPE' ] . count ( '[' ) : parameter [ 'type' ] , parameter [ 'dimensions' ] = parameter . pop ( 'TYPE' ) . split ( '[' ) parameter [ 'dimensions' ] = '[' + parameter [ 'dimensions' ] parameter [ 'type' ] = str ( parameter [ 'type' ] ) else : parameter [ 'type' ] = str ( parameter . pop ( 'TYPE' ) . split ( 'ARRAY' ) [ 0 ] ) if 'DIMENSIONS' in parameter : parameter [ 'dimensions' ] = parameter . pop ( 'DIMENSIONS' ) if 'DIRECTION' in parameter : parameter [ 'direction' ] = parameter . pop ( 'DIRECTION' ) . lower ( ) if 'DEFAULT' in parameter : if parameter [ 'DEFAULT' ] is not None : parameter [ 'default_value' ] = parameter . pop ( 'DEFAULT' ) else : parameter . pop ( 'DEFAULT' ) if 'CHOICE_LIST' in parameter : if parameter [ 'CHOICE_LIST' ] is not None : parameter [ 'choice_list' ] = parameter . pop ( 'CHOICE_LIST' ) else : parameter . pop ( 'CHOICE_LIST' ) if 'FOLD_CASE' in parameter : parameter [ 'fold_case' ] = parameter . pop ( 'FOLD_CASE' ) if 'AUTO_EXTENSION' in parameter : parameter [ 'auto_extension' ] = parameter . pop ( 'AUTO_EXTENSION' ) if 'IS_TEMPORARY' in parameter : parameter [ 'is_temporary' ] = parameter . pop ( 'IS_TEMPORARY' ) if 'IS_DIRECTORY' in parameter : parameter [ 'is_directory' ] = parameter . pop ( 'IS_DIRECTORY' ) return task_def
6,213
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/task.py#L57-L133
[ "def", "_mod_repo_in_file", "(", "repo", ",", "repostr", ",", "filepath", ")", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "filepath", ")", "as", "fhandle", ":", "output", "=", "[", "]", "for", "line", "in", "fhandle", ":", "cols", "=", "salt", ".", "utils", ".", "args", ".", "shlex_split", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ")", ".", "strip", "(", ")", ")", "if", "repo", "not", "in", "cols", ":", "output", ".", "append", "(", "line", ")", "else", ":", "output", ".", "append", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "repostr", "+", "'\\n'", ")", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "filepath", ",", "'w'", ")", "as", "fhandle", ":", "fhandle", ".", "writelines", "(", "output", ")" ]
Single - chromosome despeckling
def despeckle_simple ( B , th2 = 2 ) : A = np . copy ( B ) n1 = A . shape [ 0 ] dist = { u : np . diag ( A , u ) for u in range ( n1 ) } medians , stds = { } , { } for u in dist : medians [ u ] = np . median ( dist [ u ] ) stds [ u ] = np . std ( dist [ u ] ) for nw , j in itertools . product ( range ( n1 ) , range ( n1 ) ) : lp = j + nw kp = j - nw if lp < n1 : if A [ j , lp ] > medians [ nw ] + th2 * stds [ nw ] : A [ j , lp ] = medians [ nw ] if kp >= 0 : if A [ j , kp ] > medians [ nw ] + th2 * stds [ nw ] : A [ j , kp ] = medians [ nw ] return A
6,214
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L32-L70
[ "def", "_ParseWtmp", "(", ")", ":", "users", "=", "{", "}", "wtmp_struct_size", "=", "UtmpStruct", ".", "GetSize", "(", ")", "filenames", "=", "glob", ".", "glob", "(", "\"/var/log/wtmp*\"", ")", "+", "[", "\"/var/run/utmp\"", "]", "for", "filename", "in", "filenames", ":", "try", ":", "wtmp", "=", "open", "(", "filename", ",", "\"rb\"", ")", ".", "read", "(", ")", "except", "IOError", ":", "continue", "for", "offset", "in", "range", "(", "0", ",", "len", "(", "wtmp", ")", ",", "wtmp_struct_size", ")", ":", "try", ":", "record", "=", "UtmpStruct", "(", "wtmp", "[", "offset", ":", "offset", "+", "wtmp_struct_size", "]", ")", "except", "utils", ".", "ParsingError", ":", "break", "# Users only appear for USER_PROCESS events, others are system.", "if", "record", ".", "ut_type", "!=", "7", ":", "continue", "try", ":", "if", "users", "[", "record", ".", "ut_user", "]", "<", "record", ".", "tv_sec", ":", "users", "[", "record", ".", "ut_user", "]", "=", "record", ".", "tv_sec", "except", "KeyError", ":", "users", "[", "record", ".", "ut_user", "]", "=", "record", ".", "tv_sec", "return", "users" ]
Perform the bin_dense procedure for sparse matrices . Remaining rows and cols are lumped with the rest at the end .
def bin_sparse ( M , subsampling_factor = 3 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense binning by default." ) return bin_dense ( M . todense ( ) ) N = M . tocoo ( ) n , m = N . shape row , col , data = N . row , N . col , N . data # Divide row and column indices - duplicate coordinates are added in # sparse matrix construction binned_row = row // subsampling_factor binned_col = col // subsampling_factor binned_n = n // subsampling_factor binned_m = m // subsampling_factor # Attach remaining columns and rows to the last one binned_row [ binned_row >= binned_n ] -= n % subsampling_factor binned_col [ binned_col >= binned_m ] -= m % subsampling_factor result = coo_matrix ( ( data , ( binned_row , binned_col ) ) , shape = ( binned_n , binned_m ) ) return result
6,215
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L174-L205
[ "def", "optional", "(", "e", ",", "default", "=", "Ignore", ")", ":", "def", "match_optional", "(", "s", ",", "grm", "=", "None", ",", "pos", "=", "0", ")", ":", "try", ":", "return", "e", "(", "s", ",", "grm", ",", "pos", ")", "except", "PegreError", ":", "return", "PegreResult", "(", "s", ",", "default", ",", "(", "pos", ",", "pos", ")", ")", "return", "match_optional" ]
Bin either sparse or dense matrices .
def bin_matrix ( M , subsampling_factor = 3 ) : try : from scipy . sparse import issparse if issparse ( M ) : return bin_sparse ( M , subsampling_factor = subsampling_factor ) else : raise ImportError except ImportError : return bin_dense ( M , subsampling_factor = subsampling_factor )
6,216
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L208-L219
[ "def", "logs", "(", "self", ",", "container", ",", "stdout", "=", "True", ",", "stderr", "=", "True", ",", "stream", "=", "False", ",", "timestamps", "=", "False", ",", "tail", "=", "'all'", ",", "since", "=", "None", ",", "follow", "=", "None", ",", "until", "=", "None", ")", ":", "if", "follow", "is", "None", ":", "follow", "=", "stream", "params", "=", "{", "'stderr'", ":", "stderr", "and", "1", "or", "0", ",", "'stdout'", ":", "stdout", "and", "1", "or", "0", ",", "'timestamps'", ":", "timestamps", "and", "1", "or", "0", ",", "'follow'", ":", "follow", "and", "1", "or", "0", ",", "}", "if", "tail", "!=", "'all'", "and", "(", "not", "isinstance", "(", "tail", ",", "int", ")", "or", "tail", "<", "0", ")", ":", "tail", "=", "'all'", "params", "[", "'tail'", "]", "=", "tail", "if", "since", "is", "not", "None", ":", "if", "isinstance", "(", "since", ",", "datetime", ")", ":", "params", "[", "'since'", "]", "=", "utils", ".", "datetime_to_timestamp", "(", "since", ")", "elif", "(", "isinstance", "(", "since", ",", "int", ")", "and", "since", ">", "0", ")", ":", "params", "[", "'since'", "]", "=", "since", "else", ":", "raise", "errors", ".", "InvalidArgument", "(", "'since value should be datetime or positive int, '", "'not {}'", ".", "format", "(", "type", "(", "since", ")", ")", ")", "if", "until", "is", "not", "None", ":", "if", "utils", ".", "version_lt", "(", "self", ".", "_version", ",", "'1.35'", ")", ":", "raise", "errors", ".", "InvalidVersion", "(", "'until is not supported for API version < 1.35'", ")", "if", "isinstance", "(", "until", ",", "datetime", ")", ":", "params", "[", "'until'", "]", "=", "utils", ".", "datetime_to_timestamp", "(", "until", ")", "elif", "(", "isinstance", "(", "until", ",", "int", ")", "and", "until", ">", "0", ")", ":", "params", "[", "'until'", "]", "=", "until", "else", ":", "raise", "errors", ".", "InvalidArgument", "(", "'until value should be datetime or positive int, '", "'not {}'", ".", "format", "(", "type", "(", "until", ")", ")", ")", "url", "=", "self", ".", "_url", "(", "\"/containers/{0}/logs\"", ",", "container", ")", "res", "=", "self", ".", "_get", "(", "url", ",", "params", "=", "params", ",", "stream", "=", "stream", ")", "output", "=", "self", ".", "_get_result", "(", "container", ",", "stream", ",", "res", ")", "if", "stream", ":", "return", "CancellableStream", "(", "output", ",", "res", ")", "else", ":", "return", "output" ]
Perform binning on genome annotations such as contig information or bin positions .
def bin_annotation ( annotation = None , subsampling_factor = 3 ) : if annotation is None : annotation = np . array ( [ ] ) n = len ( annotation ) binned_positions = [ annotation [ i ] for i in range ( n ) if i % subsampling_factor == 0 ] if len ( binned_positions ) == 0 : binned_positions . append ( 0 ) return np . array ( binned_positions )
6,217
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L222-L234
[ "def", "forbidden", "(", "cls", ",", "errors", "=", "None", ")", ":", "if", "cls", ".", "expose_status", ":", "# pragma: no cover", "cls", ".", "response", ".", "content_type", "=", "'application/json'", "cls", ".", "response", ".", "_status_line", "=", "'403 Forbidden'", "return", "cls", "(", "403", ",", "errors", "=", "errors", ")", ".", "to_json" ]
Iterate over a given number of times on matrix M so as to compute smaller and smaller matrices with bin_dense .
def build_pyramid ( M , subsampling_factor = 3 ) : subs = int ( subsampling_factor ) if subs < 1 : raise ValueError ( "Subsampling factor needs to be an integer greater than 1." ) N = [ M ] while min ( N [ - 1 ] . shape ) > 1 : N . append ( bin_matrix ( N [ - 1 ] , subsampling_factor = subs ) ) return N
6,218
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L251-L263
[ "def", "blank_object", "(", "obj", ":", "T", ",", "fieldlist", ":", "Sequence", "[", "str", "]", ")", "->", "None", ":", "for", "f", "in", "fieldlist", ":", "setattr", "(", "obj", ",", "f", ",", "None", ")" ]
Perform the kb - binning procedure with total bin lengths being exactly set to that of the specified input . Fragments overlapping two potential bins will be split and related contact counts will be divided according to overlap proportions in each bin .
def bin_exact_kb_dense ( M , positions , length = 10 ) : unit = 10 ** 3 ul = unit * length units = positions / ul n = len ( positions ) idx = [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ] m = len ( idx ) - 1 N = np . zeros ( ( m , m ) ) remainders = [ 0 ] + [ np . abs ( units [ i ] - units [ i + 1 ] ) for i in range ( m ) ] for i in range ( m ) : N [ i ] = np . array ( [ ( M [ idx [ j ] : idx [ j + 1 ] , idx [ i ] : idx [ i + 1 ] ] . sum ( ) - remainders [ j ] * M [ i ] [ j ] + remainders [ j + 1 ] * M [ i + 1 ] [ j ] ) for j in range ( m ) ] ) return N
6,219
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L290-L311
[ "def", "fetch", "(", "self", ",", "raise_exc", "=", "True", ")", ":", "self", ".", "_request", "(", "GET", ",", "raise_exc", "=", "raise_exc", ")", "# ingests response", "self", ".", "fetched", "=", "True", "return", "self", ".", "state", ".", "copy", "(", ")" ]
Perform the exact kb - binning procedure on a sparse matrix .
def bin_kb_sparse ( M , positions , length = 10 ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return bin_kb_dense ( M . todense ( ) , positions = positions ) r = M . tocoo ( ) unit = 10 ** 3 ul = unit * length units = positions / ul n = len ( positions ) indices = np . floor ( units ) row = [ indices [ np . floor ( i ) ] for i in r . row / ul ] col = [ indices [ np . floor ( j ) ] for j in r . col / ul ] binned_indices = positions [ [ i for i in range ( n - 1 ) if np . ceil ( units [ i ] ) < np . ceil ( units [ i + 1 ] ) ] ] return coo_matrix ( ( r . data , ( row , col ) ) ) , binned_indices
6,220
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L314-L334
[ "def", "read_until_close", "(", "self", ",", "timeout_ms", "=", "None", ")", ":", "while", "True", ":", "try", ":", "yield", "self", ".", "read", "(", "timeout_ms", "=", "timeout_ms", ")", "except", "usb_exceptions", ".", "AdbStreamClosedError", ":", "break" ]
Apply the trimming procedure to a sparse matrix .
def trim_sparse ( M , n_std = 3 , s_min = None , s_max = None ) : try : from scipy . sparse import coo_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return trim_dense ( M . todense ( ) ) r = M . tocoo ( ) sparsity = np . array ( r . sum ( axis = 1 ) ) . flatten ( ) mean = np . mean ( sparsity ) std = np . std ( sparsity ) if s_min is None : s_min = mean - n_std * std if s_max is None : s_max = mean + n_std * std f = ( sparsity > s_min ) * ( sparsity < s_max ) indices = [ u for u in range ( len ( r . data ) ) if f [ r . row [ u ] ] and f [ r . col [ u ] ] ] rows = np . array ( [ r . row [ i ] for i in indices ] ) cols = np . array ( [ r . col [ j ] for j in indices ] ) data = np . array ( [ r . data [ k ] for k in indices ] ) N = coo_matrix ( ( data , ( rows , cols ) ) ) return N
6,221
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L361-L386
[ "def", "moment2", "(", "self", ")", ":", "moment1", "=", "self", ".", "moment1", "delays", ",", "response", "=", "self", ".", "delay_response_series", "return", "statstools", ".", "calc_mean_time_deviation", "(", "delays", ",", "response", ",", "moment1", ")" ]
Apply one of the many normalization types to input dense matrix . Will also apply any callable norms such as a user - made or a lambda function .
def normalize_dense ( M , norm = "frag" , order = 1 , iterations = 3 ) : s = np . array ( M , np . float64 ) floatorder = np . float64 ( order ) if norm == "SCN" : for _ in range ( 0 , iterations ) : sumrows = s . sum ( axis = 1 ) maskrows = ( sumrows != 0 ) [ : , None ] * ( sumrows != 0 ) [ None , : ] sums_row = sumrows [ : , None ] * np . ones ( sumrows . shape ) [ None , : ] s [ maskrows ] = 1. * s [ maskrows ] / sums_row [ maskrows ] sumcols = s . sum ( axis = 0 ) maskcols = ( sumcols != 0 ) [ : , None ] * ( sumcols != 0 ) [ None , : ] sums_col = sumcols [ None , : ] * np . ones ( sumcols . shape ) [ : , None ] s [ maskcols ] = 1. * s [ maskcols ] / sums_col [ maskcols ] elif norm == "mirnylib" : try : from mirnylib import numutils as ntls s = ntls . iterativeCorrection ( s , iterations ) [ 0 ] except ImportError as e : print ( str ( e ) ) print ( "I can't find mirnylib." ) print ( "Please install it from " "https://bitbucket.org/mirnylab/mirnylib" ) print ( "I will use default norm as fallback." ) return normalize_dense ( M , order = order , iterations = iterations ) elif norm == "frag" : for _ in range ( 1 , iterations ) : s_norm_x = np . linalg . norm ( s , ord = floatorder , axis = 0 ) s_norm_y = np . linalg . norm ( s , ord = floatorder , axis = 1 ) s_norm = np . tensordot ( s_norm_x , s_norm_y , axes = 0 ) s [ s_norm != 0 ] = 1. * s [ s_norm != 0 ] / s_norm [ s_norm != 0 ] elif norm == "global" : s_norm = np . linalg . norm ( s , ord = floatorder ) s /= 1. * s_norm elif callable ( norm ) : s = norm ( M ) else : print ( "Unknown norm. Returning input as fallback" ) return ( s + s . T ) / 2
6,222
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L389-L440
[ "def", "delete_binding", "(", "self", ",", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", ":", "vhost", "=", "quote", "(", "vhost", ",", "''", ")", "exchange", "=", "quote", "(", "exchange", ",", "''", ")", "queue", "=", "quote", "(", "queue", ",", "''", ")", "body", "=", "''", "path", "=", "Client", ".", "urls", "[", "'rt_bindings_between_exch_queue'", "]", "%", "(", "vhost", ",", "exchange", ",", "queue", ",", "rt_key", ")", "return", "self", ".", "_call", "(", "path", ",", "'DELETE'", ",", "headers", "=", "Client", ".", "json_headers", ")" ]
Applies a normalization type to a sparse matrix .
def normalize_sparse ( M , norm = "frag" , order = 1 , iterations = 3 ) : try : from scipy . sparse import csr_matrix except ImportError as e : print ( str ( e ) ) print ( "I am peforming dense normalization by default." ) return normalize_dense ( M . todense ( ) ) r = csr_matrix ( M ) if norm == "SCN" : for _ in range ( 1 , iterations ) : row_sums = np . array ( r . sum ( axis = 1 ) ) . flatten ( ) col_sums = np . array ( r . sum ( axis = 0 ) ) . flatten ( ) row_indices , col_indices = r . nonzero ( ) r . data /= row_sums [ row_indices ] * col_sums [ col_indices ] elif norm == "global" : try : from scipy . sparse import linalg r = linalg . norm ( M , ord = order ) except ( ImportError , AttributeError ) as e : print ( str ( e ) ) print ( "I can't import linalg tools for sparse matrices." ) print ( "Please upgrade your scipy version to 0.16.0." ) elif callable ( norm ) : r = norm ( M ) else : print ( "Unknown norm. Returning input as fallback" ) return r
6,223
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L443-L476
[ "def", "freeze_encrypt", "(", "dest_dir", ",", "zip_filename", ",", "config", ",", "opt", ")", ":", "pgp_keys", "=", "grok_keys", "(", "config", ")", "icefile_prefix", "=", "\"aomi-%s\"", "%", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "dirname", "(", "opt", ".", "secretfile", ")", ")", "if", "opt", ".", "icefile_prefix", ":", "icefile_prefix", "=", "opt", ".", "icefile_prefix", "timestamp", "=", "time", ".", "strftime", "(", "\"%H%M%S-%m-%d-%Y\"", ",", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "timetuple", "(", ")", ")", "ice_file", "=", "\"%s/%s-%s.ice\"", "%", "(", "dest_dir", ",", "icefile_prefix", ",", "timestamp", ")", "if", "not", "encrypt", "(", "zip_filename", ",", "ice_file", ",", "pgp_keys", ")", ":", "raise", "aomi", ".", "exceptions", ".", "GPG", "(", "\"Unable to encrypt zipfile\"", ")", "return", "ice_file" ]
Compute GC across a window of given length .
def GC_wide ( genome , window = 1000 ) : GC = [ ] from Bio import SeqIO with open ( genome ) as handle : sequence = "" . join ( [ str ( record . seq ) for record in SeqIO . parse ( handle , "fasta" ) ] ) n = len ( sequence ) for i in range ( 0 , n , window ) : portion = sequence [ i : min ( i + window , n ) ] GC . append ( GC_partial ( portion ) ) return GC
6,224
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L492-L509
[ "def", "update_entity_alias", "(", "self", ",", "alias_id", ",", "name", ",", "canonical_id", ",", "mount_accessor", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'canonical_id'", ":", "canonical_id", ",", "'mount_accessor'", ":", "mount_accessor", ",", "}", "api_path", "=", "'/v1/{mount_point}/entity-alias/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "alias_id", ",", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "if", "response", ".", "status_code", "==", "204", ":", "return", "response", "else", ":", "return", "response", ".", "json", "(", ")" ]
Returns a Dade matrix from input numpy matrix . Any annotations are added as header . If filename is provided and valid said matrix is also saved as text .
def to_dade_matrix ( M , annotations = "" , filename = None ) : n , m = M . shape A = np . zeros ( ( n + 1 , m + 1 ) ) A [ 1 : , 1 : ] = M if not annotations : annotations = np . array ( [ "" for _ in n ] , dtype = str ) A [ 0 , : ] = annotations A [ : , 0 ] = annotations . T if filename : try : np . savetxt ( filename , A , fmt = '%i' ) print ( "I saved input matrix in dade format as " + str ( filename ) ) except ValueError as e : print ( "I couldn't save input matrix." ) print ( str ( e ) ) finally : return A return A
6,225
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L678-L701
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Compute the adjacency matrix of the largest connected component of the graph whose input matrix is adjacent .
def largest_connected_component ( matrix ) : try : import scipy . sparse n , components = scipy . sparse . csgraph . connected_components ( matrix , directed = False ) print ( "I found " + str ( n ) + " connected components." ) component_dist = collections . Counter ( components ) print ( "Distribution of components: " + str ( component_dist ) ) most_common , _ = component_dist . most_common ( 1 ) [ 0 ] ilcc = ( components == most_common ) return matrix [ : , ilcc ] [ ilcc ] except ImportError as e : print ( "I couldn't find scipy which is needed for graph routines." ) print ( str ( e ) ) print ( "Returning input matrix as fallback." ) return matrix
6,226
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L736-L756
[ "def", "write_new_expr_id", "(", "self", ",", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", "=", "None", ")", ":", "# check if id already exists", "check_id", "=", "self", ".", "get_expr_id", "(", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", "=", "comments", ")", "if", "check_id", ":", "return", "check_id", "# experiment not found in table", "row", "=", "self", ".", "RowType", "(", ")", "row", ".", "experiment_id", "=", "self", ".", "get_next_id", "(", ")", "row", ".", "search_group", "=", "search_group", "row", ".", "search", "=", "search", "row", ".", "lars_id", "=", "lars_id", "row", ".", "instruments", "=", "ifos_from_instrument_set", "(", "instruments", ")", "row", ".", "gps_start_time", "=", "gps_start_time", "row", ".", "gps_end_time", "=", "gps_end_time", "row", ".", "comments", "=", "comments", "self", ".", "append", "(", "row", ")", "# return new ID", "return", "row", ".", "experiment_id" ]
Compute best matching 3D genome structure from underlying input matrix using ShRec3D - derived method from Lesne et al . 2014 .
def to_structure ( matrix , alpha = 1 ) : connected = largest_connected_component ( matrix ) distances = to_distance ( connected , alpha ) n , m = connected . shape bary = np . sum ( np . triu ( distances , 1 ) ) / ( n ** 2 ) # barycenters d = np . array ( np . sum ( distances ** 2 , 0 ) / n - bary ) # distances to origin gram = np . array ( [ ( d [ i ] + d [ j ] - distances [ i ] [ j ] ** 2 ) / 2 for i , j in itertools . product ( range ( n ) , range ( m ) ) ] ) . reshape ( n , m ) normalized = gram / np . linalg . norm ( gram , 'fro' ) try : symmetric = np . array ( ( normalized + normalized . T ) / 2 , dtype = np . longfloat ) # just in case except AttributeError : symmetric = np . array ( ( normalized + normalized . T ) / 2 ) from scipy import linalg eigen_values , eigen_vectors = linalg . eigh ( symmetric ) if not ( eigen_values >= 0 ) . all ( ) : warnings . warn ( "Negative eigen values were found." ) idx = eigen_values . argsort ( ) [ - 3 : ] [ : : - 1 ] values = eigen_values [ idx ] vectors = eigen_vectors [ : , idx ] coordinates = vectors * np . sqrt ( values ) return coordinates
6,227
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L759-L798
[ "def", "check_important_variables", "(", "self", ")", ":", "if", "len", "(", "self", ".", "important_variables", "-", "set", "(", "self", ".", "args", ".", "keys", "(", ")", ")", ")", ":", "raise", "TypeError", "(", "\"Some important variables are not set\"", ")" ]
Retrieve indices of a trimmed matrix with respect to the original matrix . Fairly fast but is only correct if diagonal values are different which is always the case in practice .
def get_missing_bins ( original , trimmed ) : original_diag = np . diag ( original ) trimmed_diag = np . diag ( trimmed ) index = [ ] m = min ( original . shape ) for j in range ( min ( trimmed . shape ) ) : k = 0 while original_diag [ j + k ] != trimmed_diag [ j ] and k < 2 * m : k += 1 index . append ( k + j ) return np . array ( index )
6,228
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L801-L816
[ "async", "def", "list", "(", "source", ")", ":", "result", "=", "[", "]", "async", "with", "streamcontext", "(", "source", ")", "as", "streamer", ":", "async", "for", "item", "in", "streamer", ":", "result", ".", "append", "(", "item", ")", "yield", "result" ]
Compute contact matrix from input distance matrix . Distance values of zeroes are given the largest contact count otherwise inferred non - zero distance values .
def distance_to_contact ( D , alpha = 1 ) : if callable ( alpha ) : distance_function = alpha else : try : a = np . float64 ( alpha ) def distance_function ( x ) : return 1 / ( x ** ( 1 / a ) ) except TypeError : print ( "Alpha parameter must be callable or an array-like" ) raise except ZeroDivisionError : raise ValueError ( "Alpha parameter must be non-zero" ) m = np . max ( distance_function ( D [ D != 0 ] ) ) M = np . zeros ( D . shape ) M [ D != 0 ] = distance_function ( D [ D != 0 ] ) M [ D == 0 ] = m return M
6,229
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L918-L942
[ "def", "MergeAllSummaries", "(", "period", "=", "0", ",", "run_alone", "=", "False", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key", "=", "tf", ".", "GraphKeys", ".", "SUMMARIES", "period", "=", "int", "(", "period", ")", "if", "run_alone", ":", "return", "MergeAllSummaries_RunAlone", "(", "period", ",", "key", ")", "else", ":", "return", "MergeAllSummaries_RunWithOp", "(", "period", ",", "key", ")" ]
Import a structure object from a PDB file .
def pdb_to_structure ( filename ) : try : from Bio . PDB import PDB except ImportError : print ( "I can't import Biopython which is needed to handle PDB files." ) raise p = PDB . PDBParser ( ) structure = p . get_structure ( 'S' , filename ) for _ in structure . get_chains ( ) : atoms = [ np . array ( atom . get_coord ( ) ) for atom in structure . get_atoms ( ) ] return atoms
6,230
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L980-L993
[ "def", "clear_all", "(", "self", ")", ":", "keys", "=", "self", ".", "_analytics_backend", ".", "keys", "(", ")", "for", "key", "in", "itertools", ".", "chain", "(", "*", "keys", ")", ":", "with", "self", ".", "_analytics_backend", ".", "map", "(", ")", "as", "conn", ":", "if", "key", ".", "startswith", "(", "self", ".", "_prefix", ")", ":", "conn", ".", "delete", "(", "key", ")" ]
Flattens and converts a positions array to a contigs array if applicable .
def positions_to_contigs ( positions ) : if isinstance ( positions , np . ndarray ) : flattened_positions = positions . flatten ( ) else : try : flattened_positions = np . array ( [ pos for contig in positions for pos in contig ] ) except TypeError : flattened_positions = np . array ( positions ) if ( np . diff ( positions ) == 0 ) . any ( ) and not ( 0 in set ( positions ) ) : warnings . warn ( "I detected identical consecutive nonzero values." ) return positions n = len ( flattened_positions ) contigs = np . ones ( n ) counter = 0 for i in range ( 1 , n ) : if positions [ i ] == 0 : counter += 1 contigs [ i ] += counter else : contigs [ i ] = contigs [ i - 1 ] return contigs
6,231
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1005-L1031
[ "def", "fetch_result", "(", "self", ",", "trial", ")", ":", "trial_future", "=", "self", ".", "_find_item", "(", "self", ".", "_running", ",", "trial", ")", "if", "not", "trial_future", ":", "raise", "ValueError", "(", "\"Trial was not running.\"", ")", "self", ".", "_running", ".", "pop", "(", "trial_future", "[", "0", "]", ")", "with", "warn_if_slow", "(", "\"fetch_result\"", ")", ":", "result", "=", "ray", ".", "get", "(", "trial_future", "[", "0", "]", ")", "# For local mode", "if", "isinstance", "(", "result", ",", "_LocalWrapper", ")", ":", "result", "=", "result", ".", "unwrap", "(", ")", "return", "result" ]
Compute a distance law trend using the contact averages of equal distances . Specific positions can be supplied if needed .
def distance_diagonal_law ( matrix , positions = None ) : n = min ( matrix . shape ) if positions is None : return np . array ( [ np . average ( np . diagonal ( matrix , j ) ) for j in range ( n ) ] ) else : contigs = positions_to_contigs ( positions ) def is_intra ( i , j ) : return contigs [ i ] == contigs [ j ] max_intra_distance = max ( ( len ( contigs == u ) for u in set ( contigs ) ) ) intra_contacts = [ ] inter_contacts = [ np . average ( np . diagonal ( matrix , j ) ) for j in range ( max_intra_distance , n ) ] for j in range ( max_intra_distance ) : D = np . diagonal ( matrix , j ) for i in range ( len ( D ) ) : diagonal_intra = [ ] if is_intra ( i , j ) : diagonal_intra . append ( D [ i ] ) # else: # diagonal_inter.append(D[i]) # inter_contacts.append(np.average(np.array(diagonal_inter))) intra_contacts . append ( np . average ( np . array ( diagonal_intra ) ) ) intra_contacts . extend ( inter_contacts ) return [ positions , np . array ( intra_contacts ) ]
6,232
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1034-L1066
[ "def", "clear", "(", "self", ")", ":", "if", "self", ".", "_bit_count", "==", "0", ":", "return", "block", "=", "self", ".", "_qpart", ".", "document", "(", ")", ".", "begin", "(", ")", "while", "block", ".", "isValid", "(", ")", ":", "if", "self", ".", "getBlockValue", "(", "block", ")", ":", "self", ".", "setBlockValue", "(", "block", ",", "0", ")", "block", "=", "block", ".", "next", "(", ")" ]
Estimate parameters from the model described in Rippe et al . 2001 .
def rippe_parameters ( matrix , positions , lengths = None , init = None , circ = False ) : n , _ = matrix . shape if lengths is None : lengths = np . abs ( np . diff ( positions ) ) measurements , bins = [ ] , [ ] for i in range ( n ) : for j in range ( 1 , i ) : mean_length = ( lengths [ i ] + lengths [ j ] ) / 2. if positions [ i ] < positions [ j ] : d = ( ( ( positions [ j ] - positions [ i ] - lengths [ i ] ) + mean_length ) / 1000. ) else : d = ( ( ( positions [ i ] - positions [ j ] - lengths [ j ] ) + mean_length ) / 1000. ) bins . append ( np . abs ( d ) ) measurements . append ( matrix [ i , j ] ) parameters = estimate_param_rippe ( measurements , bins , init = init , circ = circ ) print ( parameters ) return parameters [ 0 ]
6,233
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1069-L1095
[ "def", "_update_trial_queue", "(", "self", ",", "blocking", "=", "False", ",", "timeout", "=", "600", ")", ":", "trials", "=", "self", ".", "_search_alg", ".", "next_trials", "(", ")", "if", "blocking", "and", "not", "trials", ":", "start", "=", "time", ".", "time", "(", ")", "# Checking `is_finished` instead of _search_alg.is_finished", "# is fine because blocking only occurs if all trials are", "# finished and search_algorithm is not yet finished", "while", "(", "not", "trials", "and", "not", "self", ".", "is_finished", "(", ")", "and", "time", ".", "time", "(", ")", "-", "start", "<", "timeout", ")", ":", "logger", ".", "info", "(", "\"Blocking for next trial...\"", ")", "trials", "=", "self", ".", "_search_alg", ".", "next_trials", "(", ")", "time", ".", "sleep", "(", "1", ")", "for", "trial", "in", "trials", ":", "self", ".", "add_trial", "(", "trial", ")" ]
Computes so - called scalograms used to easily visualize contacts at different distance scales . Edge cases have been painstakingly taken care of .
def scalogram ( M , circ = False ) : # Sanity checks if not type ( M ) is np . ndarray : M = np . array ( M ) if M . shape [ 0 ] != M . shape [ 1 ] : raise ValueError ( "Matrix is not square." ) try : n = min ( M . shape ) except AttributeError : n = M . size N = np . zeros ( M . shape ) for i in range ( n ) : for j in range ( n ) : if i + j < n and i >= j : N [ i , j ] = M [ i , i - j : i + j + 1 ] . sum ( ) elif circ and i + j < n and i < j : N [ i , j ] = M [ i , i - j : ] . sum ( ) + M [ i , : i + j + 1 ] . sum ( ) elif circ and i >= j and i + j >= n : N [ i , j ] = M [ i , i - j : ] . sum ( ) + M [ i , : i + j - n + 1 ] . sum ( ) elif circ and i < j and i + j >= n : N [ i , j ] = ( M [ i , i - j : ] . sum ( ) + M [ i , : ] . sum ( ) + M [ i , : i + j - n + 1 ] . sum ( ) ) return N
6,234
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1288-L1321
[ "def", "toggle", "(", "self", ",", "section", ",", "option", ")", ":", "self", ".", "set", "(", "section", ",", "option", ",", "not", "self", ".", "get", "(", "section", ",", "option", ")", ")" ]
Compute a Fourier transform based distance between two matrices .
def asd ( M1 , M2 ) : from scipy . fftpack import fft2 spectra1 = np . abs ( fft2 ( M1 ) ) spectra2 = np . abs ( fft2 ( M2 ) ) return np . linalg . norm ( spectra2 - spectra1 )
6,235
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1324-L1336
[ "def", "inject_provisional_community", "(", "sender", ",", "json", "=", "None", ",", "record", "=", "None", ",", "index", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "index", "and", "not", "index", ".", "startswith", "(", "current_app", ".", "config", "[", "'COMMUNITIES_INDEX_PREFIX'", "]", ")", ":", "return", "json", "[", "'provisional_communities'", "]", "=", "list", "(", "sorted", "(", "[", "r", ".", "id_community", "for", "r", "in", "InclusionRequest", ".", "get_by_record", "(", "record", ".", "id", ")", "]", ")", ")" ]
Remove intrachromosomal contacts
def remove_intra ( M , contigs ) : N = np . copy ( M ) n = len ( N ) assert n == len ( contigs ) # Naive implmentation for now for ( i , j ) in itertools . product ( range ( n ) , range ( n ) ) : if contigs [ i ] == contigs [ j ] : N [ i , j ] = 0 return N
6,236
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1378-L1412
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Label contigs according to relative positions
def positions_to_contigs ( positions ) : contig_labels = np . zeros_like ( positions ) contig_index = 0 for i , p in enumerate ( positions ) : if p == 0 : contig_index += 1 contig_labels [ i ] = contig_index return contig_labels
6,237
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1452-L1480
[ "def", "create_token_with_refresh_token", "(", "self", ",", "data", ",", "token_valid_for", "=", "180", ",", "refresh_token_valid_for", "=", "86400", ")", ":", "refresh_token", "=", "None", "refresh_token", "=", "jwt", ".", "encode", "(", "{", "'exp'", ":", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "refresh_token_valid_for", ")", "}", ",", "self", ".", "app_secret", ")", ".", "decode", "(", "\"utf-8\"", ")", "jwt_token", "=", "jwt", ".", "encode", "(", "{", "'data'", ":", "data", ",", "'refresh_token'", ":", "refresh_token", ",", "'exp'", ":", "datetime", ".", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "token_valid_for", ")", "}", ",", "self", ".", "app_secret", ")", "return", "Security", ".", "encrypt", "(", "jwt_token", ")" ]
Build positions from contig labels
def contigs_to_positions ( contigs , binning = 10000 ) : positions = np . zeros_like ( contigs ) index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) positions [ index : index + l ] = np . arange ( list ( chunk ) ) * binning index += l return positions
6,238
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1482-L1511
[ "def", "convert_to_experiment_list", "(", "experiments", ")", ":", "exp_list", "=", "experiments", "# Transform list if necessary", "if", "experiments", "is", "None", ":", "exp_list", "=", "[", "]", "elif", "isinstance", "(", "experiments", ",", "Experiment", ")", ":", "exp_list", "=", "[", "experiments", "]", "elif", "type", "(", "experiments", ")", "is", "dict", ":", "exp_list", "=", "[", "Experiment", ".", "from_json", "(", "name", ",", "spec", ")", "for", "name", ",", "spec", "in", "experiments", ".", "items", "(", ")", "]", "# Validate exp_list", "if", "(", "type", "(", "exp_list", ")", "is", "list", "and", "all", "(", "isinstance", "(", "exp", ",", "Experiment", ")", "for", "exp", "in", "exp_list", ")", ")", ":", "if", "len", "(", "exp_list", ")", ">", "1", ":", "logger", ".", "warning", "(", "\"All experiments will be \"", "\"using the same SearchAlgorithm.\"", ")", "else", ":", "raise", "TuneError", "(", "\"Invalid argument: {}\"", ".", "format", "(", "experiments", ")", ")", "return", "exp_list" ]
Split multiple chromosome matrix
def split_matrix ( M , contigs ) : index = 0 for _ , chunk in itertools . groubpy ( contigs ) : l = len ( chunk ) yield M [ index : index + l , index : index + l ] index += l
6,239
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1514-L1533
[ "def", "_ParseWtmp", "(", ")", ":", "users", "=", "{", "}", "wtmp_struct_size", "=", "UtmpStruct", ".", "GetSize", "(", ")", "filenames", "=", "glob", ".", "glob", "(", "\"/var/log/wtmp*\"", ")", "+", "[", "\"/var/run/utmp\"", "]", "for", "filename", "in", "filenames", ":", "try", ":", "wtmp", "=", "open", "(", "filename", ",", "\"rb\"", ")", ".", "read", "(", ")", "except", "IOError", ":", "continue", "for", "offset", "in", "range", "(", "0", ",", "len", "(", "wtmp", ")", ",", "wtmp_struct_size", ")", ":", "try", ":", "record", "=", "UtmpStruct", "(", "wtmp", "[", "offset", ":", "offset", "+", "wtmp_struct_size", "]", ")", "except", "utils", ".", "ParsingError", ":", "break", "# Users only appear for USER_PROCESS events, others are system.", "if", "record", ".", "ut_type", "!=", "7", ":", "continue", "try", ":", "if", "users", "[", "record", ".", "ut_user", "]", "<", "record", ".", "tv_sec", ":", "users", "[", "record", ".", "ut_user", "]", "=", "record", ".", "tv_sec", "except", "KeyError", ":", "users", "[", "record", ".", "ut_user", "]", "=", "record", ".", "tv_sec", "return", "users" ]
Find the nearest item of x from sorted array .
def find_nearest ( sorted_list , x ) : if x <= sorted_list [ 0 ] : return sorted_list [ 0 ] elif x >= sorted_list [ - 1 ] : return sorted_list [ - 1 ] else : lower = find_le ( sorted_list , x ) upper = find_ge ( sorted_list , x ) if ( x - lower ) > ( upper - x ) : return upper else : return lower
6,240
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/binarysearch.py#L146-L178
[ "def", "regenerate_recovery_code", "(", "self", ",", "user_id", ")", ":", "url", "=", "self", ".", "_url", "(", "'{}/recovery-code-regeneration'", ".", "format", "(", "user_id", ")", ")", "return", "self", ".", "client", ".", "post", "(", "url", ")" ]
Set x axis s format .
def format_x_tick ( axis , major_locator = None , major_formatter = None , minor_locator = None , minor_formatter = None ) : if major_locator : axis . xaxis . set_major_locator ( major_locator ) if major_formatter : axis . xaxis . set_major_formatter ( major_formatter ) if minor_locator : axis . xaxis . set_minor_locator ( minor_locator ) if minor_formatter : axis . xaxis . set_minor_formatter ( minor_formatter ) axis . autoscale_view ( ) plt . setp ( axis . xaxis . get_majorticklabels ( ) , rotation = 90 ) plt . setp ( axis . xaxis . get_minorticklabels ( ) , rotation = 90 ) axis . grid ( )
6,241
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L60-L85
[ "def", "dump", "(", "self", ")", ":", "assert", "self", ".", "database", "is", "not", "None", "cmd", "=", "\"SELECT count from {} WHERE rowid={}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "STATE_INFO_ROW", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "self", ".", "_from_sqlite", "(", "ret", "[", "0", "]", "[", "0", "]", ")", "+", "self", ".", "inserts", "if", "count", ">", "self", ".", "row_limit", ":", "msg", "=", "\"cleaning up state, this might take a while.\"", "logger", ".", "warning", "(", "msg", ")", "delete", "=", "count", "-", "self", ".", "row_limit", "delete", "+=", "int", "(", "self", ".", "row_limit", "*", "(", "self", ".", "row_cleanup_quota", "/", "100.0", ")", ")", "cmd", "=", "(", "\"DELETE FROM {} WHERE timestamp IN (\"", "\"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});\"", ")", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ",", "self", ".", "STATE_TABLE", ",", "delete", ")", ")", "self", ".", "_vacuum", "(", ")", "cmd", "=", "\"SELECT COUNT(*) FROM {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_TABLE", ")", ")", "ret", "=", "self", ".", "_fetchall", "(", ")", "assert", "len", "(", "ret", ")", "==", "1", "assert", "len", "(", "ret", "[", "0", "]", ")", "==", "1", "count", "=", "ret", "[", "0", "]", "[", "0", "]", "cmd", "=", "\"UPDATE {} SET count = {} WHERE rowid = {}\"", "self", ".", "_execute", "(", "cmd", ".", "format", "(", "self", ".", "STATE_INFO_TABLE", ",", "self", ".", "_to_sqlite", "(", "count", ")", ",", "self", ".", "STATE_INFO_ROW", ",", ")", ")", "self", ".", "_update_cache_directory_state", "(", ")", "self", ".", "database", ".", "commit", "(", ")", "self", ".", "cursor", ".", "close", "(", ")", "self", ".", "database", ".", "close", "(", ")", "self", ".", "database", "=", "None", "self", ".", "cursor", "=", "None", "self", ".", "inserts", "=", "0" ]
Set line legend .
def set_legend ( axis , lines , legend ) : try : if legend : axis . legend ( lines , legend ) except Exception as e : raise ValueError ( "invalid 'legend', Error: %s" % e )
6,242
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L119-L130
[ "def", "restart", "(", "self", ",", "timeout", "=", "None", ")", ":", "msg", "=", "{", "\"value\"", ":", "\"Restart requested by \"", "+", "self", ".", "username", "+", "\"via the Splunk SDK for Python\"", "}", "# This message will be deleted once the server actually restarts.", "self", ".", "messages", ".", "create", "(", "name", "=", "\"restart_required\"", ",", "*", "*", "msg", ")", "result", "=", "self", ".", "post", "(", "\"server/control/restart\"", ")", "if", "timeout", "is", "None", ":", "return", "result", "start", "=", "datetime", ".", "now", "(", ")", "diff", "=", "timedelta", "(", "seconds", "=", "timeout", ")", "while", "datetime", ".", "now", "(", ")", "-", "start", "<", "diff", ":", "try", ":", "self", ".", "login", "(", ")", "if", "not", "self", ".", "restart_required", ":", "return", "result", "except", "Exception", "as", "e", ":", "sleep", "(", "1", ")", "raise", "Exception", "(", "\"Operation time out.\"", ")" ]
Get maximum value of an array . Automatically ignore invalid data .
def get_max ( array ) : largest = - np . inf for i in array : try : if i > largest : largest = i except : pass if np . isinf ( largest ) : raise ValueError ( "there's no numeric value in array!" ) else : return largest
6,243
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L133-L150
[ "def", "toc", "(", "self", ")", ":", "elapsed", "=", "self", ".", "_time", "(", ")", "-", "self", ".", "tstart", "if", "self", ".", "verbose", ":", "self", ".", "write", "(", "'...toc(%r)=%.4fs\\n'", "%", "(", "self", ".", "label", ",", "elapsed", ")", ")", "self", ".", "flush", "(", ")", "return", "elapsed" ]
Get minimum value of an array . Automatically ignore invalid data .
def get_min ( array ) : smallest = np . inf for i in array : try : if i < smallest : smallest = i except : pass if np . isinf ( smallest ) : raise ValueError ( "there's no numeric value in array!" ) else : return smallest
6,244
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L153-L170
[ "def", "toc", "(", "self", ")", ":", "elapsed", "=", "self", ".", "_time", "(", ")", "-", "self", ".", "tstart", "if", "self", ".", "verbose", ":", "self", ".", "write", "(", "'...toc(%r)=%.4fs\\n'", "%", "(", "self", ".", "label", ",", "elapsed", ")", ")", "self", ".", "flush", "(", ")", "return", "elapsed" ]
Find optimal y_min and y_max that guarantee enough space for legend and plot .
def get_yAxis_limit ( y , lower = 0.05 , upper = 0.2 ) : smallest = get_min ( y ) largest = get_max ( y ) gap = largest - smallest if gap >= 0.000001 : y_min = smallest - lower * gap y_max = largest + upper * gap else : y_min = smallest - lower * abs ( smallest ) y_max = largest + upper * abs ( largest ) return y_min , y_max
6,245
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L173-L193
[ "def", "run", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Initializing...\"", ")", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"initialize\"", ",", "\"()V\"", ")", "logger", ".", "info", "(", "\"Running...\"", ")", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"runExperiment\"", ",", "\"()V\"", ")", "logger", ".", "info", "(", "\"Finished...\"", ")", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"postProcess\"", ",", "\"()V\"", ")" ]
Create a figure instance .
def create_figure ( width = 20 , height = 10 ) : figure = plt . figure ( figsize = ( width , height ) ) axis = figure . add_subplot ( 1 , 1 , 1 ) return figure , axis
6,246
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L196-L204
[ "def", "HandleMessageBundles", "(", "self", ",", "request_comms", ",", "response_comms", ")", ":", "messages", ",", "source", ",", "timestamp", "=", "self", ".", "_communicator", ".", "DecodeMessages", "(", "request_comms", ")", "now", "=", "time", ".", "time", "(", ")", "if", "messages", ":", "# Receive messages in line.", "self", ".", "ReceiveMessages", "(", "source", ",", "messages", ")", "# We send the client a maximum of self.max_queue_size messages", "required_count", "=", "max", "(", "0", ",", "self", ".", "max_queue_size", "-", "request_comms", ".", "queue_size", ")", "tasks", "=", "[", "]", "message_list", "=", "rdf_flows", ".", "MessageList", "(", ")", "# Only give the client messages if we are able to receive them in a", "# reasonable time.", "if", "time", ".", "time", "(", ")", "-", "now", "<", "10", ":", "tasks", "=", "self", ".", "DrainTaskSchedulerQueueForClient", "(", "source", ",", "required_count", ")", "message_list", ".", "job", "=", "tasks", "# Encode the message_list in the response_comms using the same API version", "# the client used.", "self", ".", "_communicator", ".", "EncodeMessages", "(", "message_list", ",", "response_comms", ",", "destination", "=", "source", ",", "timestamp", "=", "timestamp", ",", "api_version", "=", "request_comms", ".", "api_version", ")", "return", "source", ",", "len", "(", "messages", ")" ]
Preprocess x y input data . Returns list of list style .
def preprocess_x_y ( x , y ) : def is_iterable_slicable ( a ) : if hasattr ( a , "__iter__" ) and hasattr ( a , "__getitem__" ) : return True else : return False if is_iterable_slicable ( x ) : if is_iterable_slicable ( x [ 0 ] ) : return x , y else : return ( x , ) , ( y , ) else : raise ValueError ( "invalid input!" )
6,247
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/matplot_mate.py#L207-L226
[ "def", "_get_constrs_no_operation", "(", "self", ",", "gadget", ")", ":", "# Constraints on memory locations.", "# mem_constrs = [self.analyzer.get_memory(\"pre\") != self.analyzer.get_memory(\"post\")]", "mem_constrs", "=", "[", "self", ".", "analyzer", ".", "get_memory_curr", "(", "\"pre\"", ")", ".", "__neq__", "(", "self", ".", "analyzer", ".", "get_memory_curr", "(", "\"post\"", ")", ")", "]", "# Constraints on flags.", "flags_constrs", "=", "[", "]", "for", "name", "in", "self", ".", "_arch_info", ".", "registers_flags", ":", "var_initial", "=", "self", ".", "analyzer", ".", "get_register_expr", "(", "name", ",", "mode", "=", "\"pre\"", ")", "var_final", "=", "self", ".", "analyzer", ".", "get_register_expr", "(", "name", ",", "mode", "=", "\"post\"", ")", "flags_constrs", "+=", "[", "var_initial", "!=", "var_final", "]", "# Constraints on registers.", "reg_constrs", "=", "[", "]", "for", "name", "in", "self", ".", "_arch_info", ".", "registers_gp_base", ":", "var_initial", "=", "self", ".", "analyzer", ".", "get_register_expr", "(", "name", ",", "mode", "=", "\"pre\"", ")", "var_final", "=", "self", ".", "analyzer", ".", "get_register_expr", "(", "name", ",", "mode", "=", "\"post\"", ")", "reg_constrs", "+=", "[", "var_initial", "!=", "var_final", "]", "# Make a big OR expression.", "constrs", "=", "mem_constrs", "+", "flags_constrs", "+", "reg_constrs", "constrs", "=", "[", "reduce", "(", "lambda", "c", ",", "acc", ":", "acc", "|", "c", ",", "constrs", "[", "1", ":", "]", ",", "constrs", "[", "0", "]", ")", "]", "return", "constrs" ]
Execute a task with the provided input parameters
def execute ( input_params , engine , cwd = None ) : try : taskengine_exe = config . get ( 'engine' ) except NoConfigOptionError : raise TaskEngineNotFoundError ( "Task Engine config option not set." + "\nPlease verify the 'engine' configuration setting." ) if not os . path . exists ( taskengine_exe ) : raise TaskEngineNotFoundError ( "Task Engine executable not found." + "\nPlease verify the 'engine' configuration setting." ) # Get any arguments for the taskengine engine_args = None try : engine_args = config . get ( 'engine-args' ) except NoConfigOptionError : pass # Get environment overrides if they exist environment = None config_environment = config . get_environment ( ) if config_environment : environment = os . environ . copy ( ) environment . update ( config_environment ) # Build up the args vector for popen args = [ taskengine_exe , engine ] if engine_args : args . append ( engine_args ) # Hide the Console Window on Windows OS startupinfo = None if sys . platform . startswith ( 'win' ) : startupinfo = subprocess . STARTUPINFO ( ) startupinfo . dwFlags |= subprocess . STARTF_USESHOWWINDOW input_json = json . dumps ( input_params ) process = Popen ( args , stdout = PIPE , stdin = PIPE , stderr = PIPE , cwd = cwd , env = environment , startupinfo = startupinfo ) # taskengine output is in UTF8. Encode/Decode to UTF8 stdout , stderr = process . communicate ( input = input_json . encode ( 'utf-8' ) ) if process . returncode != 0 : if stderr != '' : raise TaskEngineExecutionError ( stderr . decode ( 'utf-8' ) ) else : raise TaskEngineExecutionError ( 'Task Engine exited with code: ' + str ( process . returncode ) ) else : return json . loads ( stdout . decode ( 'utf-8' ) , object_pairs_hook = OrderedDict )
6,248
https://github.com/envi-idl/envipyengine/blob/567b639d6592deec3289f6122a9e3d18f2f98432/envipyengine/taskengine/taskengine.py#L17-L84
[ "def", "get_browser_state_or_default", "(", "request", ")", ":", "key", "=", "(", "request", ".", "session", ".", "session_key", "or", "settings", ".", "get", "(", "'OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY'", ")", ")", "return", "sha224", "(", "key", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
run the lilypond script on the hierarchy class
def run ( self , wrappers = [ "" , "" ] ) : opened_file = open ( self . lyfile , 'w' ) lilystring = self . piece_obj . toLily ( ) opened_file . writelines ( wrappers [ 0 ] + "\\version \"2.18.2\" \n" + lilystring + wrappers [ 1 ] ) opened_file . close ( ) # subprocess.Popen(['sudo', self.lily_script," --output=" + # self.folder, self.lyfile]) os . system ( self . lily_script + " --loglevel=WARNING --output=" + self . folder + " " + self . lyfile )
6,249
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/Output/LilypondOutput.py#L42-L64
[ "def", "_compute_inter_event_std", "(", "self", ",", "C", ",", "C_PGA", ",", "pga1100", ",", "mag", ",", "vs30", ")", ":", "tau_0", "=", "self", ".", "_compute_std_0", "(", "C", "[", "'s3'", "]", ",", "C", "[", "'s4'", "]", ",", "mag", ")", "tau_b_pga", "=", "self", ".", "_compute_std_0", "(", "C_PGA", "[", "'s3'", "]", ",", "C_PGA", "[", "'s4'", "]", ",", "mag", ")", "delta_amp", "=", "self", ".", "_compute_partial_derivative_site_amp", "(", "C", ",", "pga1100", ",", "vs30", ")", "std_inter", "=", "np", ".", "sqrt", "(", "tau_0", "**", "2", "+", "(", "delta_amp", "**", "2", ")", "*", "(", "tau_b_pga", "**", "2", ")", "+", "2", "*", "delta_amp", "*", "tau_0", "*", "tau_b_pga", "*", "C", "[", "'rho'", "]", ")", "return", "std_inter" ]
Extract sequences from bins
def extract_fasta ( partition_file , fasta_file , output_dir , chunk_size = DEFAULT_CHUNK_SIZE , max_cores = DEFAULT_MAX_CORES , ) : genome = { record . id : record . seq for record in SeqIO . parse ( fasta_file , "fasta" ) } data_chunks = list ( zip ( * np . genfromtxt ( partition_file , usecols = ( 0 , 1 ) , dtype = None ) ) ) chunk_names = np . array ( data_chunks [ 0 ] , dtype = object ) cores = np . array ( data_chunks [ 1 ] ) for core in set ( cores ) : if core > max_cores : continue chunks_to_keep = chunk_names [ cores == core ] core_name = "core_{}.fa" . format ( core ) core_file = os . path . join ( output_dir , core_name ) with open ( core_file , "w" ) as core_handle : for name in chunks_to_keep : fields = name . split ( "_" ) header_name = "_" . join ( fields [ : - 1 ] ) chunk = int ( fields [ - 1 ] ) pos_start = chunk * chunk_size pos_end = min ( ( chunk + 1 ) * chunk_size , len ( genome [ header_name ] ) ) sequence = str ( genome [ header_name ] [ pos_start : pos_end ] ) core_handle . write ( ">{}\n" . format ( name ) ) core_handle . write ( "{}\n" . format ( sequence ) )
6,250
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/bins.py#L183-L243
[ "def", "_extract_match", "(", "self", ",", "candidate", ",", "offset", ")", ":", "# Skip a match that is more likely a publication page reference or a", "# date.", "if", "(", "_SLASH_SEPARATED_DATES", ".", "search", "(", "candidate", ")", ")", ":", "return", "None", "# Skip potential time-stamps.", "if", "_TIME_STAMPS", ".", "search", "(", "candidate", ")", ":", "following_text", "=", "self", ".", "text", "[", "offset", "+", "len", "(", "candidate", ")", ":", "]", "if", "_TIME_STAMPS_SUFFIX", ".", "match", "(", "following_text", ")", ":", "return", "None", "# Try to come up with a valid match given the entire candidate.", "match", "=", "self", ".", "_parse_and_verify", "(", "candidate", ",", "offset", ")", "if", "match", "is", "not", "None", ":", "return", "match", "# If that failed, try to find an \"inner match\" -- there might be a", "# phone number within this candidate.", "return", "self", ".", "_extract_inner_match", "(", "candidate", ",", "offset", ")" ]
Merge chunks into complete FASTA bins
def merge_fasta ( fasta_file , output_dir ) : # First, define some functions for ordering chunks and detecting # consecutive chunk sequences def chunk_lexicographic_order ( chunk ) : """A quick callback to sort chunk ids lexicographically (first on original names alphabetically, then on relative position on the original contig) """ chunk_fields = chunk . split ( "_" ) chunk_name = chunk_fields [ : - 1 ] chunk_id = chunk_fields [ - 1 ] return ( chunk_name , int ( chunk_id ) ) def are_consecutive ( chunk1 , chunk2 ) : if None in { chunk1 , chunk2 } : return False else : ord1 = chunk_lexicographic_order ( chunk1 ) ord2 = chunk_lexicographic_order ( chunk2 ) return ( ord1 [ 0 ] == ord2 [ 0 ] ) and ( ord1 [ 1 ] == ord2 [ 1 ] + 1 ) def consecutiveness ( key_chunk_pair ) : """A callback for the groupby magic below """ key , chunk = key_chunk_pair chunk_name , chunk_id = chunk_lexicographic_order ( chunk ) return ( chunk_name , chunk_id - key ) # Read chunks and sort them genome = { record . id : record . seq for record in SeqIO . parse ( fasta_file , "fasta" ) } sorted_ids = sorted ( genome , key = chunk_lexicographic_order ) # Identify consecutive ranges and merge them new_genome = dict ( ) for _ , g in itertools . groupby ( enumerate ( sorted_ids ) , consecutiveness ) : chunk_range = map ( operator . itemgetter ( 1 ) , g ) first_chunk = next ( chunk_range ) my_sequence = genome [ first_chunk ] my_chunk = None while "Reading chunk range" : try : my_chunk = next ( chunk_range ) my_sequence += genome [ my_chunk ] except StopIteration : break try : last_chunk_id = my_chunk . split ( "_" ) [ - 1 ] except AttributeError : last_chunk_id = "" if last_chunk_id : new_chunk_id = "{}_{}" . format ( first_chunk , last_chunk_id ) else : new_chunk_id = first_chunk new_genome [ new_chunk_id ] = my_sequence # Write the result base_name = "." . join ( os . path . basename ( fasta_file ) . split ( "." ) [ : - 1 ] ) output_name = "{}_merged.fa" . format ( base_name ) merged_core_file = os . path . join ( output_dir , output_name ) with open ( merged_core_file , "w" ) as output_handle : for my_id in sorted ( new_genome , key = chunk_lexicographic_order ) : output_handle . write ( ">{}\n" . format ( my_id ) ) output_handle . write ( "{}\n" . format ( new_genome [ my_id ] ) )
6,251
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/bins.py#L246-L327
[ "def", "get_changed_devices", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "payload", "=", "{", "}", "else", ":", "payload", "=", "{", "'timeout'", ":", "SUBSCRIPTION_WAIT", ",", "'minimumdelay'", ":", "SUBSCRIPTION_MIN_WAIT", "}", "payload", ".", "update", "(", "timestamp", ")", "# double the timeout here so requests doesn't timeout before vera", "payload", ".", "update", "(", "{", "'id'", ":", "'lu_sdata'", ",", "}", ")", "logger", ".", "debug", "(", "\"get_changed_devices() requesting payload %s\"", ",", "str", "(", "payload", ")", ")", "r", "=", "self", ".", "data_request", "(", "payload", ",", "TIMEOUT", "*", "2", ")", "r", ".", "raise_for_status", "(", ")", "# If the Vera disconnects before writing a full response (as lu_sdata", "# will do when interrupted by a Luup reload), the requests module will", "# happily return 200 with an empty string. So, test for empty response,", "# so we don't rely on the JSON parser to throw an exception.", "if", "r", ".", "text", "==", "\"\"", ":", "raise", "PyveraError", "(", "\"Empty response from Vera\"", ")", "# Catch a wide swath of what the JSON parser might throw, within", "# reason. Unfortunately, some parsers don't specifically return", "# json.decode.JSONDecodeError, but so far most seem to derive what", "# they do throw from ValueError, so that's helpful.", "try", ":", "result", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "ex", ":", "raise", "PyveraError", "(", "\"JSON decode error: \"", "+", "str", "(", "ex", ")", ")", "if", "not", "(", "type", "(", "result", ")", "is", "dict", "and", "'loadtime'", "in", "result", "and", "'dataversion'", "in", "result", ")", ":", "raise", "PyveraError", "(", "\"Unexpected/garbled response from Vera\"", ")", "# At this point, all good. Update timestamp and return change data.", "device_data", "=", "result", ".", "get", "(", "'devices'", ")", "timestamp", "=", "{", "'loadtime'", ":", "result", ".", "get", "(", "'loadtime'", ")", ",", "'dataversion'", ":", "result", ".", "get", "(", "'dataversion'", ")", "}", "return", "[", "device_data", ",", "timestamp", "]" ]
Wrapper to call console with a loop .
def monitor ( ) : log = logging . getLogger ( __name__ ) loop = asyncio . get_event_loop ( ) asyncio . ensure_future ( console ( loop , log ) ) loop . run_forever ( )
6,252
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/tools.py#L60-L65
[ "def", "_proc_accept_header", "(", "self", ",", "request", ",", "result", ")", ":", "if", "result", ":", "# Result has already been fully determined", "return", "try", ":", "accept", "=", "request", ".", "headers", "[", "'accept'", "]", "except", "KeyError", ":", "# No Accept header to examine", "return", "# Obtain the best-match content type and its parameters", "ctype", ",", "params", "=", "best_match", "(", "accept", ",", "self", ".", "types", ".", "keys", "(", ")", ")", "# Is it a recognized content type?", "if", "ctype", "not", "in", "self", ".", "types", ":", "return", "# Get the mapped ctype and version", "mapped_ctype", ",", "mapped_version", "=", "self", ".", "types", "[", "ctype", "]", "(", "params", ")", "# Set the content type and version", "if", "mapped_ctype", ":", "result", ".", "set_ctype", "(", "mapped_ctype", ",", "ctype", ")", "if", "mapped_version", ":", "result", ".", "set_version", "(", "mapped_version", ")" ]
Creates an API object of class cls setting its _data to data . Subclasses of Object are required to use this to build a new empty instance without using their constructor .
def make_object ( cls , data ) : if issubclass ( cls , Object ) : self = object . __new__ ( cls ) self . _data = data else : self = data return self
6,253
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L296-L306
[ "def", "pip", "(", "name", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "'requirements'", ",", "'{0}.pip'", ".", "format", "(", "name", ")", ")", ")", "as", "f", ":", "return", "f", ".", "readlines", "(", ")" ]
A string valued property with max . length .
def String ( length = None , * * kwargs ) : return Property ( length = length , types = stringy_types , convert = to_string , * * kwargs )
6,254
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L365-L372
[ "def", "poisson", "(", "x", ",", "a", ",", "b", ",", "c", ",", "d", "=", "0", ")", ":", "from", "scipy", ".", "misc", "import", "factorial", "#save startup time\r", "lamb", "=", "1", "X", "=", "(", "x", "/", "(", "2", "*", "c", ")", ")", ".", "astype", "(", "int", ")", "return", "a", "*", "(", "(", "lamb", "**", "X", "/", "factorial", "(", "X", ")", ")", "*", "np", ".", "exp", "(", "-", "lamb", ")", ")", "+", "d" ]
A datetime property .
def Datetime ( null = True , * * kwargs ) : return Property ( types = datetime . datetime , convert = util . local_timezone , load = dateutil . parser . parse , null = null , * * kwargs )
6,255
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L395-L403
[ "def", "removeMigrationRequest", "(", "self", ",", "migration_rqst", ")", ":", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "tran", "=", "conn", ".", "begin", "(", ")", "self", ".", "mgrremove", ".", "execute", "(", "conn", ",", "migration_rqst", ")", "tran", ".", "commit", "(", ")", "except", "dbsException", "as", "he", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "except", "Exception", "as", "ex", ":", "if", "conn", ":", "conn", ".", "close", "(", ")", "raise", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
A property that is an instance of cls .
def InstanceOf ( cls , * * kwargs ) : return Property ( types = cls , load = cls . load , * * kwargs )
6,256
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L406-L408
[ "def", "_download_urls", "(", "url_list", ",", "storage_folder", ",", "overwrite_existing", ",", "meta_handler", ",", "access_cookie", "=", "None", ")", ":", "for", "url", "in", "url_list", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "url", ")", "if", "not", "overwrite_existing", "and", "filename", "in", "os", ".", "listdir", "(", "storage_folder", ")", ":", "continue", "storage_file", "=", "os", ".", "path", ".", "join", "(", "storage_folder", ",", "filename", ")", "# Using requests here - tried with aiohttp but was actually slower", "# Also don’t use shutil.copyfileobj - corrupts zips from Eora", "req", "=", "requests", ".", "post", "(", "url", ",", "stream", "=", "True", ",", "cookies", "=", "access_cookie", ")", "with", "open", "(", "storage_file", ",", "'wb'", ")", "as", "lf", ":", "for", "chunk", "in", "req", ".", "iter_content", "(", "1024", "*", "5", ")", ":", "lf", ".", "write", "(", "chunk", ")", "meta_handler", ".", "_add_fileio", "(", "'Downloaded {} to {}'", ".", "format", "(", "url", ",", "filename", ")", ")", "meta_handler", ".", "save", "(", ")", "return", "meta_handler" ]
A property that is a list of cls .
def ListOf ( cls , * * kwargs ) : def _list_load ( value ) : return [ cls . load ( d ) for d in value ] return Property ( types = list , load = _list_load , default = list , * * kwargs )
6,257
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L411-L417
[ "def", "OnAdjustVolume", "(", "self", ",", "event", ")", ":", "self", ".", "volume", "=", "self", ".", "player", ".", "audio_get_volume", "(", ")", "if", "event", ".", "GetWheelRotation", "(", ")", "<", "0", ":", "self", ".", "volume", "=", "max", "(", "0", ",", "self", ".", "volume", "-", "10", ")", "elif", "event", ".", "GetWheelRotation", "(", ")", ">", "0", ":", "self", ".", "volume", "=", "min", "(", "200", ",", "self", ".", "volume", "+", "10", ")", "self", ".", "player", ".", "audio_set_volume", "(", "self", ".", "volume", ")" ]
Add a named dimension to this entity .
def add_dimension ( self , name , data = None ) : self . dimensions . add ( name ) if data is None : valobj = self . __dimtype__ ( ) else : valobj = make_object ( self . __dimtype__ , data ) self . _data [ name ] = valobj setattr ( self , name , valobj ) return valobj
6,258
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/schema.py#L323-L332
[ "def", "create_or_update_secret", "(", "self", ",", "path", ",", "secret", ",", "cas", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'options'", ":", "{", "}", ",", "'data'", ":", "secret", ",", "}", "if", "cas", "is", "not", "None", ":", "params", "[", "'options'", "]", "[", "'cas'", "]", "=", "cas", "api_path", "=", "'/v1/{mount_point}/data/{path}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "path", "=", "path", ")", "response", "=", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")", "return", "response", ".", "json", "(", ")" ]
Print mwtab section into a file or stdout .
def print_block ( self , section_key , f = sys . stdout , file_format = "mwtab" ) : if file_format == "mwtab" : for key , value in self [ section_key ] . items ( ) : if section_key == "METABOLOMICS WORKBENCH" and key not in ( "VERSION" , "CREATED_ON" ) : continue if key in ( "VERSION" , "CREATED_ON" ) : cw = 20 - len ( key ) elif key in ( "SUBJECT_SAMPLE_FACTORS" , ) : cw = 33 - len ( key ) else : cw = 30 - len ( key ) if "\n" in value : for line in value . split ( "\n" ) : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , line ) , file = f ) elif key == "SUBJECT_SAMPLE_FACTORS" : for factor in value : print ( "{}{}\t{}" . format ( key , cw * " " , "\t" . join ( factor . values ( ) ) ) , file = f ) elif key . endswith ( ":UNITS" ) : print ( "{}\t{}" . format ( key , value ) , file = f ) elif key . endswith ( "_RESULTS_FILE" ) : if isinstance ( value , dict ) : print ( "{}{} \t{}\t{}:{}" . format ( self . prefixes . get ( section_key , "" ) , * [ i for pair in value . items ( ) for i in pair ] ) , file = f ) else : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , value ) , file = f ) elif key . endswith ( "_START" ) : start_key = key end_key = "{}{}" . format ( start_key [ : - 5 ] , "END" ) print ( start_key , file = f ) for data_key in value : if data_key in ( "Samples" , "Factors" ) : print ( "{}\t{}" . format ( data_key , "\t" . join ( self [ section_key ] [ key ] [ data_key ] ) ) , file = f ) elif data_key in ( "Fields" , ) : print ( "{}" . format ( "\t" . join ( self [ section_key ] [ key ] [ data_key ] ) ) , file = f ) elif data_key == "DATA" : for data in self [ section_key ] [ key ] [ data_key ] : print ( "\t" . join ( data . values ( ) ) , file = f ) print ( end_key , file = f ) else : print ( "{}{}{}\t{}" . format ( self . prefixes . get ( section_key , "" ) , key , cw * " " , value ) , file = f ) elif file_format == "json" : print ( json . dumps ( self [ section_key ] , sort_keys = False , indent = 4 ) , file = f )
6,259
https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/mwtab.py#L233-L293
[ "def", "verify_consistency", "(", "self", ",", "expected_leaf_count", ")", "->", "bool", ":", "if", "expected_leaf_count", "!=", "self", ".", "leafCount", ":", "raise", "ConsistencyVerificationFailed", "(", ")", "if", "self", ".", "get_expected_node_count", "(", "self", ".", "leafCount", ")", "!=", "self", ".", "nodeCount", ":", "raise", "ConsistencyVerificationFailed", "(", ")", "return", "True" ]
Test if input string is in mwtab format .
def _is_mwtab ( string ) : if isinstance ( string , str ) : lines = string . split ( "\n" ) elif isinstance ( string , bytes ) : lines = string . decode ( "utf-8" ) . split ( "\n" ) else : raise TypeError ( "Expecting <class 'str'> or <class 'bytes'>, but {} was passed" . format ( type ( string ) ) ) lines = [ line for line in lines if line ] header = lines [ 0 ] if header . startswith ( "#METABOLOMICS WORKBENCH" ) : return "\n" . join ( lines ) return False
6,260
https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/mwtab.py#L314-L334
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Fetch trace ids by service and span name . Gets limit number of entries from before the end_ts .
def getTraceIdsBySpanName ( self , service_name , span_name , end_ts , limit , order ) : self . send_getTraceIdsBySpanName ( service_name , span_name , end_ts , limit , order ) return self . recv_getTraceIdsBySpanName ( )
6,261
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L286-L302
[ "def", "_read_config", "(", "config_location", ")", ":", "global", "LOGGING_CONFIG", "with", "open", "(", "config_location", ",", "\"r\"", ")", "as", "config_loc", ":", "cfg_file", "=", "json", ".", "load", "(", "config_loc", ")", "if", "\"logging\"", "in", "cfg_file", ":", "log_dict", "=", "cfg_file", ".", "get", "(", "\"logging\"", ")", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "__file__", ",", "os", ".", "path", ".", "pardir", ",", "'logging_schema.json'", ")", ")", ")", "as", "schema_file", ":", "logging_schema", "=", "json", ".", "load", "(", "schema_file", ")", "jsonschema", ".", "validate", "(", "log_dict", ",", "logging_schema", ")", "merged", "=", "jsonmerge", ".", "merge", "(", "LOGGING_CONFIG", ",", "log_dict", ")", "LOGGING_CONFIG", "=", "merged" ]
Fetch trace ids by service name . Gets limit number of entries from before the end_ts .
def getTraceIdsByServiceName ( self , service_name , end_ts , limit , order ) : self . send_getTraceIdsByServiceName ( service_name , end_ts , limit , order ) return self . recv_getTraceIdsByServiceName ( )
6,262
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L332-L346
[ "def", "update", "(", "self", ",", "workspace", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/workspaces/%s\"", "%", "(", "workspace", ")", "return", "self", ".", "client", ".", "put", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
Fetch trace ids with a particular annotation . Gets limit number of entries from before the end_ts .
def getTraceIdsByAnnotation ( self , service_name , annotation , value , end_ts , limit , order ) : self . send_getTraceIdsByAnnotation ( service_name , annotation , value , end_ts , limit , order ) return self . recv_getTraceIdsByAnnotation ( )
6,263
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L375-L395
[ "def", "_read_config", "(", "config_location", ")", ":", "global", "LOGGING_CONFIG", "with", "open", "(", "config_location", ",", "\"r\"", ")", "as", "config_loc", ":", "cfg_file", "=", "json", ".", "load", "(", "config_loc", ")", "if", "\"logging\"", "in", "cfg_file", ":", "log_dict", "=", "cfg_file", ".", "get", "(", "\"logging\"", ")", "with", "open", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "__file__", ",", "os", ".", "path", ".", "pardir", ",", "'logging_schema.json'", ")", ")", ")", "as", "schema_file", ":", "logging_schema", "=", "json", ".", "load", "(", "schema_file", ")", "jsonschema", ".", "validate", "(", "log_dict", ",", "logging_schema", ")", "merged", "=", "jsonmerge", ".", "merge", "(", "LOGGING_CONFIG", ",", "log_dict", ")", "LOGGING_CONFIG", "=", "merged" ]
Get the full traces associated with the given trace ids .
def getTracesByIds ( self , trace_ids , adjust ) : self . send_getTracesByIds ( trace_ids , adjust ) return self . recv_getTracesByIds ( )
6,264
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L460-L472
[ "def", "get_score", "(", "student_item", ")", ":", "try", ":", "student_item_model", "=", "StudentItem", ".", "objects", ".", "get", "(", "*", "*", "student_item", ")", "score", "=", "ScoreSummary", ".", "objects", ".", "get", "(", "student_item", "=", "student_item_model", ")", ".", "latest", "except", "(", "ScoreSummary", ".", "DoesNotExist", ",", "StudentItem", ".", "DoesNotExist", ")", ":", "return", "None", "# By convention, scores are hidden if \"points possible\" is set to 0.", "# This can occur when an instructor has reset scores for a student.", "if", "score", ".", "is_hidden", "(", ")", ":", "return", "None", "else", ":", "return", "ScoreSerializer", "(", "score", ")", ".", "data" ]
Fetch trace summaries for the given trace ids .
def getTraceSummariesByIds ( self , trace_ids , adjust ) : self . send_getTraceSummariesByIds ( trace_ids , adjust ) return self . recv_getTraceSummariesByIds ( )
6,265
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L543-L558
[ "def", "enforce_versioning", "(", "force", "=", "False", ")", ":", "connect_str", ",", "repo_url", "=", "get_version_data", "(", ")", "LOG", ".", "warning", "(", "\"Your database uses an unversioned benchbuild schema.\"", ")", "if", "not", "force", "and", "not", "ui", ".", "ask", "(", "\"Should I enforce version control on your schema?\"", ")", ":", "LOG", ".", "error", "(", "\"User declined schema versioning.\"", ")", "return", "None", "repo_version", "=", "migrate", ".", "version", "(", "repo_url", ",", "url", "=", "connect_str", ")", "migrate", ".", "version_control", "(", "connect_str", ",", "repo_url", ",", "version", "=", "repo_version", ")", "return", "repo_version" ]
Not content with just one of traces summaries or timelines? Want it all? This is the method for you .
def getTraceCombosByIds ( self , trace_ids , adjust ) : self . send_getTraceCombosByIds ( trace_ids , adjust ) return self . recv_getTraceCombosByIds ( )
6,266
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L585-L594
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Change the TTL of a trace . If we find an interesting trace we want to keep around for further investigation .
def setTraceTimeToLive ( self , trace_id , ttl_seconds ) : self . send_setTraceTimeToLive ( trace_id , ttl_seconds ) self . recv_setTraceTimeToLive ( )
6,267
https://github.com/prezi/django-zipkin/blob/158d04cf9c2fe0adcb4cda66a250d9e41eae33f3/django_zipkin/_thrift/zipkinQuery/ZipkinQuery.py#L685-L695
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Loop through the datastore s datasources to find the datasource identified by datasource_id return the matching datasource s columns .
def discover_datasource_columns ( datastore_str , datasource_id ) : datastore = DataStore ( datastore_str ) datasource = datastore . get_datasource ( datasource_id ) if datasource . type != "RASTER" : return datasource . list_columns ( ) else : return [ ]
6,268
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L58-L67
[ "def", "set_bounds", "(", "self", ",", "lb", ",", "ub", ")", ":", "if", "lb", "is", "not", "None", "and", "ub", "is", "not", "None", "and", "lb", ">", "ub", ":", "raise", "ValueError", "(", "\"The provided lower bound {} is larger than the provided upper bound {}\"", ".", "format", "(", "lb", ",", "ub", ")", ")", "self", ".", "_lb", "=", "lb", "self", ".", "_ub", "=", "ub", "if", "self", ".", "problem", "is", "not", "None", ":", "self", ".", "problem", ".", "_pending_modifications", ".", "var_lb", ".", "append", "(", "(", "self", ",", "lb", ")", ")", "self", ".", "problem", ".", "_pending_modifications", ".", "var_ub", ".", "append", "(", "(", "self", ",", "ub", ")", ")" ]
Return numeric if the column is of type integer or real otherwise return string .
def _get_column_type ( self , column ) : ctype = column . GetType ( ) if ctype in [ ogr . OFTInteger , ogr . OFTReal ] : return 'numeric' else : return 'string'
6,269
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L269-L276
[ "def", "state_tomography_programs", "(", "state_prep", ",", "qubits", "=", "None", ",", "rotation_generator", "=", "tomography", ".", "default_rotations", ")", ":", "if", "qubits", "is", "None", ":", "qubits", "=", "state_prep", ".", "get_qubits", "(", ")", "for", "tomography_program", "in", "rotation_generator", "(", "*", "qubits", ")", ":", "state_tomography_program", "=", "Program", "(", "Pragma", "(", "\"PRESERVE_BLOCK\"", ")", ")", "state_tomography_program", ".", "inst", "(", "state_prep", ")", "state_tomography_program", ".", "inst", "(", "tomography_program", ")", "state_tomography_program", ".", "inst", "(", "Pragma", "(", "\"END_PRESERVE_BLOCK\"", ")", ")", "yield", "state_tomography_program" ]
Given an OGR string an OGR connection and an OGR layer create and return a representation of a MapFile LAYER block .
def _get_default_mapfile_excerpt ( self ) : layerobj = self . _get_layer_stub ( ) classobj = mapscript . classObj ( ) layerobj . insertClass ( classobj ) styleobj = self . _get_default_style ( ) classobj . insertStyle ( styleobj ) return mapserializer . layerobj_to_dict ( layerobj , None )
6,270
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L285-L295
[ "def", "indices_outside_segments", "(", "times", ",", "segment_files", ",", "ifo", "=", "None", ",", "segment_name", "=", "None", ")", ":", "exclude", ",", "segs", "=", "indices_within_segments", "(", "times", ",", "segment_files", ",", "ifo", "=", "ifo", ",", "segment_name", "=", "segment_name", ")", "indices", "=", "numpy", ".", "arange", "(", "0", ",", "len", "(", "times", ")", ")", "return", "numpy", ".", "delete", "(", "indices", ",", "exclude", ")", ",", "segs" ]
builds a minimal mapscript layerobj with no styling
def _get_layer_stub ( self ) : layerobj = mapscript . layerObj ( ) layerobj . name = self . name layerobj . status = mapscript . MS_ON projection = self . ogr_layer . GetSpatialRef ( ) featureIdColumn = self . _get_featureId_column ( ) if featureIdColumn is not None and featureIdColumn != '' : layerobj . metadata . set ( 'gml_featureid' , featureIdColumn ) if projection is not None : layerobj . setProjection ( projection . ExportToProj4 ( ) ) if self . datastore . connection_type == "directory" : #append the extension to the shapefile until mapserver bug 2895 is fixed datastr = os . path . normpath ( self . datastore . datastore_str + "/" + self . name ) if os . path . exists ( datastr + '.shp' ) : datastr = datastr + '.shp' elif os . path . exists ( datastr + '.SHP' ) : datastr = datastr + '.SHP' layerobj . data = datastr elif self . datastore . connection_type == "postgis" : layerobj . connectiontype = mapscript . MS_POSTGIS #remove the leading "PG:" from the connection string layerobj . connection = self . datastore . datastore_str [ 3 : ] . strip ( ) if featureIdColumn is not None and featureIdColumn != '' : layerobj . data = "%s from %s using unique %s" % ( self . ogr_layer . GetGeometryColumn ( ) , self . name , featureIdColumn ) else : layerobj . data = "%s from %s" % ( self . ogr_layer . GetGeometryColumn ( ) , self . name ) else : raise RuntimeError ( "unsupported connection type" ) if self . type == 'POINT' : layerobj . type = mapscript . MS_LAYER_POINT elif self . type == 'POLYGON' : layerobj . type = mapscript . MS_LAYER_POLYGON else : layerobj . type = mapscript . MS_LAYER_LINE return layerobj
6,271
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/datasource_discovery.py#L303-L341
[ "def", "rooms", "(", "status", "=", "None", ")", ":", "room_query", "=", "VCRoom", ".", "find", "(", "type", "=", "'vidyo'", ")", "table_data", "=", "[", "[", "'ID'", ",", "'Name'", ",", "'Status'", ",", "'Vidyo ID'", ",", "'Extension'", "]", "]", "if", "status", ":", "room_query", "=", "room_query", ".", "filter", "(", "VCRoom", ".", "status", "==", "VCRoomStatus", ".", "get", "(", "status", ")", ")", "for", "room", "in", "room_query", ":", "table_data", ".", "append", "(", "[", "unicode", "(", "room", ".", "id", ")", ",", "room", ".", "name", ",", "room", ".", "status", ".", "name", ",", "unicode", "(", "room", ".", "data", "[", "'vidyo_id'", "]", ")", ",", "unicode", "(", "room", ".", "vidyo_extension", ".", "extension", ")", "]", ")", "table", "=", "AsciiTable", "(", "table_data", ")", "for", "col", "in", "(", "0", ",", "3", ",", "4", ")", ":", "table", ".", "justify_columns", "[", "col", "]", "=", "'right'", "print", "table", ".", "table" ]
tries to connect to the same app on differnet host from dist - info
def reelect_app ( self , request , app ) : # disconnect app explicitly to break possibly existing connection app . disconnect ( ) endpoints_size = len ( app . locator . endpoints ) # try x times, where x is the number of different endpoints in app locator. for _ in xrange ( 0 , endpoints_size + 1 ) : # last chance to take app from common pool if len ( app . locator . endpoints ) == 0 : request . logger . info ( "giving up on connecting to dist-info hosts, falling back to common pool processing" ) app = yield self . proxy . reelect_app ( request , app ) raise gen . Return ( app ) try : # always create new locator to prevent locking as we do connect with timeout # however lock can be still held during TCP timeout locator = Locator ( endpoints = app . locator . endpoints ) request . logger . info ( "connecting to locator %s" , locator . endpoints [ 0 ] ) # first try to connect to locator only on remote host with timeout yield gen . with_timeout ( self . service_connect_timeout , locator . connect ( ) ) request . logger . debug ( "connected to locator %s for %s" , locator . endpoints [ 0 ] , app . name ) app = Service ( app . name , locator = locator , timeout = RESOLVE_TIMEOUT ) # try to resolve and connect to application itself yield gen . with_timeout ( self . service_connect_timeout , app . connect ( ) ) request . logger . debug ( "connected to application %s via %s" , app . name , app . endpoints ) except gen . TimeoutError : # on timeout try next endpoint first request . logger . warning ( "timed out while connecting to application" ) continue except ServiceError as err : request . logger . warning ( "got error while resolving app - %s" , err ) if err . category in LOCATORCATEGORY and err . code == ESERVICENOTAVAILABLE : # if the application is down - also try next endpoint continue else : raise err finally : # drop first endpoint to start next connection from different endpoint # we do this, as default logic of connection attempts in locator do not fit here app . locator . endpoints = app . locator . endpoints [ 1 : ] # return connected app raise gen . Return ( app ) raise PluginApplicationError ( 42 , 42 , "could not connect to application" )
6,272
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/proxy/mds_direct.py#L69-L116
[ "def", "filter_grounded_only", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "score_threshold", "=", "body", ".", "get", "(", "'score_threshold'", ")", "if", "score_threshold", "is", "not", "None", ":", "score_threshold", "=", "float", "(", "score_threshold", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "filter_grounded_only", "(", "stmts", ",", "score_threshold", "=", "score_threshold", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Demonstration of recording a message .
def RecordHelloWorld ( handler , t ) : url = "%s/receive_recording.py" % THIS_URL t . startRecording ( url ) t . say ( "Hello, World." ) t . stopRecording ( ) json = t . RenderJson ( ) logging . info ( "RecordHelloWorld json: %s" % json ) handler . response . out . write ( json )
6,273
https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L69-L79
[ "def", "check_throttles", "(", "self", ",", "request", ")", ":", "for", "throttle", "in", "self", ".", "get_throttles", "(", ")", ":", "if", "not", "throttle", ".", "allow_request", "(", "request", ",", "self", ")", ":", "self", ".", "throttled", "(", "request", ",", "throttle", ".", "wait", "(", ")", ")" ]
Demonstration of redirecting to another number .
def RedirectDemo ( handler , t ) : # t.say ("One moment please.") t . redirect ( SIP_PHONE ) json = t . RenderJson ( ) logging . info ( "RedirectDemo json: %s" % json ) handler . response . out . write ( json )
6,274
https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L81-L89
[ "def", "_get_indices", "(", "self", ",", "element", ",", "labels", "=", "'all'", ",", "mode", "=", "'or'", ")", ":", "# Parse and validate all input values.", "element", "=", "self", ".", "_parse_element", "(", "element", ",", "single", "=", "True", ")", "labels", "=", "self", ".", "_parse_labels", "(", "labels", "=", "labels", ",", "element", "=", "element", ")", "if", "element", "+", "'.all'", "not", "in", "self", ".", "keys", "(", ")", ":", "raise", "Exception", "(", "'Cannot proceed without {}.all'", ".", "format", "(", "element", ")", ")", "# Begin computing label array", "if", "mode", "in", "[", "'or'", ",", "'any'", ",", "'union'", "]", ":", "union", "=", "sp", ".", "zeros_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "bool", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "union", "=", "union", "+", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "ind", "=", "union", "elif", "mode", "in", "[", "'and'", ",", "'all'", ",", "'intersection'", "]", ":", "intersect", "=", "sp", ".", "ones_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "bool", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "intersect", "=", "intersect", "*", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "ind", "=", "intersect", "elif", "mode", "in", "[", "'xor'", ",", "'exclusive_or'", "]", ":", "xor", "=", "sp", ".", "zeros_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "int", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "info", "=", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "xor", "=", "xor", "+", "sp", ".", "int8", "(", "info", ")", "ind", "=", "(", "xor", "==", "1", ")", "elif", "mode", "in", "[", "'nor'", ",", "'not'", ",", "'none'", "]", ":", "nor", "=", "sp", ".", "zeros_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "int", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "info", "=", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "nor", "=", "nor", "+", "sp", ".", "int8", "(", "info", ")", "ind", "=", "(", "nor", "==", "0", ")", "elif", "mode", "in", "[", "'nand'", "]", ":", "nand", "=", "sp", ".", "zeros_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "int", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "info", "=", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "nand", "=", "nand", "+", "sp", ".", "int8", "(", "info", ")", "ind", "=", "(", "nand", "<", "len", "(", "labels", ")", ")", "*", "(", "nand", ">", "0", ")", "elif", "mode", "in", "[", "'xnor'", ",", "'nxor'", "]", ":", "xnor", "=", "sp", ".", "zeros_like", "(", "self", "[", "element", "+", "'.all'", "]", ",", "dtype", "=", "int", ")", "for", "item", "in", "labels", ":", "# Iterate over labels and collect all indices", "info", "=", "self", "[", "element", "+", "'.'", "+", "item", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "]", "xnor", "=", "xnor", "+", "sp", ".", "int8", "(", "info", ")", "ind", "=", "(", "xnor", ">", "1", ")", "else", ":", "raise", "Exception", "(", "'Unsupported mode: '", "+", "mode", ")", "# Extract indices from boolean mask", "ind", "=", "sp", ".", "where", "(", "ind", ")", "[", "0", "]", "ind", "=", "ind", ".", "astype", "(", "dtype", "=", "int", ")", "return", "ind" ]
Demonstration of transfering to another number
def TransferDemo ( handler , t ) : t . say ( "One moment please." ) t . transfer ( MY_PHONE ) t . say ( "Hi. I am a robot" ) json = t . RenderJson ( ) logging . info ( "TransferDemo json: %s" % json ) handler . response . out . write ( json )
6,275
https://github.com/tropo/tropo-webapi-python/blob/f87772644a6b45066a4c5218f0c1f6467b64ab3c/samples/appengine/main.py#L91-L100
[ "def", "on_episode_end", "(", "self", ",", "episode", ",", "logs", ")", ":", "duration", "=", "timeit", ".", "default_timer", "(", ")", "-", "self", ".", "starts", "[", "episode", "]", "metrics", "=", "self", ".", "metrics", "[", "episode", "]", "if", "np", ".", "isnan", "(", "metrics", ")", ".", "all", "(", ")", ":", "mean_metrics", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "for", "_", "in", "self", ".", "metrics_names", "]", ")", "else", ":", "mean_metrics", "=", "np", ".", "nanmean", "(", "metrics", ",", "axis", "=", "0", ")", "assert", "len", "(", "mean_metrics", ")", "==", "len", "(", "self", ".", "metrics_names", ")", "data", "=", "list", "(", "zip", "(", "self", ".", "metrics_names", ",", "mean_metrics", ")", ")", "data", "+=", "list", "(", "logs", ".", "items", "(", ")", ")", "data", "+=", "[", "(", "'episode'", ",", "episode", ")", ",", "(", "'duration'", ",", "duration", ")", "]", "for", "key", ",", "value", "in", "data", ":", "if", "key", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "key", "]", "=", "[", "]", "self", ".", "data", "[", "key", "]", ".", "append", "(", "value", ")", "if", "self", ".", "interval", "is", "not", "None", "and", "episode", "%", "self", ".", "interval", "==", "0", ":", "self", ".", "save_data", "(", ")", "# Clean up.", "del", "self", ".", "metrics", "[", "episode", "]", "del", "self", ".", "starts", "[", "episode", "]" ]
Decorator function for retrying the decorated function using an exponential or fixed backoff .
def retry ( ExceptionToCheck , tries = 4 , delay = 3 , backoff = 2 , status_codes = [ ] , logger = None ) : if backoff is None or backoff <= 0 : raise ValueError ( "backoff must be a number greater than 0" ) tries = math . floor ( tries ) if tries < 0 : raise ValueError ( "tries must be a number 0 or greater" ) if delay is None or delay <= 0 : raise ValueError ( "delay must be a number greater than 0" ) def deco_retry ( f ) : def f_retry ( * args , * * kwargs ) : mtries , mdelay = tries , delay while mtries > 1 : try : return f ( * args , * * kwargs ) except ExceptionToCheck as err : if ( type ( err ) is DataFailureException and len ( status_codes ) and err . status not in status_codes ) : raise if logger : logger . warning ( '%s: %s, Retrying in %s seconds.' % ( f . __name__ , err , mdelay ) ) time . sleep ( mdelay ) mtries -= 1 mdelay *= backoff return f ( * args , * * kwargs ) return f_retry return deco_retry
6,276
https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/util/retry.py#L6-L58
[ "def", "get_data_files", "(", "top", ")", ":", "data_files", "=", "[", "]", "ntrim", "=", "len", "(", "here", "+", "os", ".", "path", ".", "sep", ")", "for", "(", "d", ",", "_", ",", "filenames", ")", "in", "os", ".", "walk", "(", "top", ")", ":", "data_files", ".", "append", "(", "(", "d", "[", "ntrim", ":", "]", ",", "[", "os", ".", "path", ".", "join", "(", "d", ",", "f", ")", "for", "f", "in", "filenames", "]", ")", ")", "return", "data_files" ]
This method allows a service to edit a response .
def _custom_response_edit ( self , method , url , headers , body , response ) : if self . get_implementation ( ) . is_mock ( ) : delay = self . get_setting ( "MOCKDATA_DELAY" , 0.0 ) time . sleep ( delay ) self . _edit_mock_response ( method , url , headers , body , response )
6,277
https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L64-L74
[ "def", "patch_model_schemas", "(", "mapping", ")", ":", "from", "mbdata", ".", "models", "import", "Base", "for", "table", "in", "Base", ".", "metadata", ".", "sorted_tables", ":", "if", "table", ".", "schema", "is", "None", ":", "continue", "table", ".", "schema", "=", "mapping", ".", "get", "(", "table", ".", "schema", ",", "table", ".", "schema", ")" ]
Request a URL using the HTTP method POST .
def postURL ( self , url , headers = { } , body = None ) : return self . _load_resource ( "POST" , url , headers , body )
6,278
https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L100-L104
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
Request a URL using the HTTP method PUT .
def putURL ( self , url , headers , body = None ) : return self . _load_resource ( "PUT" , url , headers , body )
6,279
https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L106-L110
[ "def", "_sincedb_start_position", "(", "self", ")", ":", "if", "not", "self", ".", "_sincedb_path", ":", "return", "None", "self", ".", "_sincedb_init", "(", ")", "self", ".", "_log_debug", "(", "'retrieving start_position from sincedb'", ")", "conn", "=", "sqlite3", ".", "connect", "(", "self", ".", "_sincedb_path", ",", "isolation_level", "=", "None", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'select position from sincedb where fid = :fid and filename = :filename'", ",", "{", "'fid'", ":", "self", ".", "_fid", ",", "'filename'", ":", "self", ".", "_filename", "}", ")", "start_position", "=", "None", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", ":", "start_position", ",", "=", "row", "return", "start_position" ]
Request a URL using the HTTP method PATCH .
def patchURL ( self , url , headers , body ) : return self . _load_resource ( "PATCH" , url , headers , body )
6,280
https://github.com/uw-it-aca/uw-restclients-core/blob/fda9380dceb6355ec6a3123e88c9ec66ae992682/restclients_core/dao.py#L112-L116
[ "def", "numRegisteredForRole", "(", "self", ",", "role", ",", "includeTemporaryRegs", "=", "False", ")", ":", "count", "=", "self", ".", "eventregistration_set", ".", "filter", "(", "cancelled", "=", "False", ",", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "count", "(", ")", "if", "includeTemporaryRegs", ":", "count", "+=", "self", ".", "temporaryeventregistration_set", ".", "filter", "(", "dropIn", "=", "False", ",", "role", "=", "role", ")", ".", "exclude", "(", "registration__expirationDate__lte", "=", "timezone", ".", "now", "(", ")", ")", ".", "count", "(", ")", "return", "count" ]
Decorate f to run inside the directory where setup . py resides .
def setup_dir ( f ) : setup_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) def wrapped ( * args , * * kwargs ) : with chdir ( setup_dir ) : return f ( * args , * * kwargs ) return wrapped
6,281
https://github.com/mete0r/hypua2jamo/blob/caceb33a26c27645703d659a82bb1152deef1469/setup.py#L33-L42
[ "def", "delete_classifier", "(", "self", ",", "classifier_id", ",", "*", "*", "kwargs", ")", ":", "if", "classifier_id", "is", "None", ":", "raise", "ValueError", "(", "'classifier_id must be provided'", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'watson_vision_combined'", ",", "'V3'", ",", "'delete_classifier'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "url", "=", "'/v3/classifiers/{0}'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "classifier_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'DELETE'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "accept_json", "=", "True", ")", "return", "response" ]
Template tag to render a feedback form .
def feedback_form ( context ) : user = None url = None if context . get ( 'request' ) : url = context [ 'request' ] . path if context [ 'request' ] . user . is_authenticated ( ) : user = context [ 'request' ] . user return { 'form' : FeedbackForm ( url = url , user = user ) , 'background_color' : FEEDBACK_FORM_COLOR , 'text_color' : FEEDBACK_FORM_TEXTCOLOR , 'text' : FEEDBACK_FORM_TEXT , }
6,282
https://github.com/bitlabstudio/django-feedback-form/blob/e3b5acbbde37caddab2da65f0fd5d7f3a8c8c597/feedback_form/templatetags/feedback_tags.py#L11-L24
[ "def", "is_wow64", "(", "self", ")", ":", "try", ":", "wow64", "=", "self", ".", "__wow64", "except", "AttributeError", ":", "if", "(", "win32", ".", "bits", "==", "32", "and", "not", "win32", ".", "wow64", ")", ":", "wow64", "=", "False", "else", ":", "if", "win32", ".", "PROCESS_ALL_ACCESS", "==", "win32", ".", "PROCESS_ALL_ACCESS_VISTA", ":", "dwAccess", "=", "win32", ".", "PROCESS_QUERY_LIMITED_INFORMATION", "else", ":", "dwAccess", "=", "win32", ".", "PROCESS_QUERY_INFORMATION", "hProcess", "=", "self", ".", "get_handle", "(", "dwAccess", ")", "try", ":", "wow64", "=", "win32", ".", "IsWow64Process", "(", "hProcess", ")", "except", "AttributeError", ":", "wow64", "=", "False", "self", ".", "__wow64", "=", "wow64", "return", "wow64" ]
Joins the items to be selected and inserts the current table name
def select ( self , * itms ) : if not itms : itms = [ '*' ] self . terms . append ( "select %s from %s" % ( ', ' . join ( itms ) , self . table ) ) return self
6,283
https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L45-L50
[ "def", "start_vm", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "vm_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Compute/virtualMachines/'", ",", "vm_name", ",", "'/start'", ",", "'?api-version='", ",", "COMP_API", "]", ")", "return", "do_post", "(", "endpoint", ",", "''", ",", "access_token", ")" ]
Build out the in clause . Using _in due to shadowing for in
def _in ( self , * lst ) : self . terms . append ( 'in (%s)' % ', ' . join ( [ '"%s"' % x for x in lst ] ) ) return self
6,284
https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L57-L60
[ "def", "get_balance", "(", "self", ",", "address", ")", ":", "url_append", "=", "\"/balance?token=%s\"", "%", "self", ".", "api_key", "url", "=", "self", ".", "base_url", "(", "\"addrs/%s\"", "%", "(", "address", "+", "url_append", ")", ")", "result", "=", "json", ".", "loads", "(", "urlopen", "(", "url", ")", ".", "read", "(", ")", ".", "decode", "(", "\"utf8\"", ")", ")", "return", "result" ]
Take all of the parts components and build the complete query to be passed to Yahoo YQL
def compile ( self ) : cs = "" for term in self . terms : if cs : cs += " " cs += term self . compiled_str = urllib . parse . quote ( cs ) return self
6,285
https://github.com/nickelkr/yfi/blob/720773ea311abe01be83982f26a61ef744f9f648/yfi/yql.py#L69-L78
[ "def", "get_collision_state", "(", "self", ",", "collision_name", ")", ":", "return", "self", ".", "call_remote_api", "(", "'simxReadCollision'", ",", "self", ".", "get_collision_handle", "(", "collision_name", ")", ",", "streaming", "=", "True", ")" ]
Construct a generator that yields file instances .
def read_files ( * sources , * * kwds ) : filenames = _generate_filenames ( sources ) filehandles = _generate_handles ( filenames ) for fh , source in filehandles : try : f = mwtab . MWTabFile ( source ) f . read ( fh ) if kwds . get ( 'validate' ) : validator . validate_file ( mwtabfile = f , section_schema_mapping = mwschema . section_schema_mapping , validate_samples = True , validate_factors = True ) yield f if VERBOSE : print ( "Processed file: {}" . format ( os . path . abspath ( source ) ) ) except Exception as e : if VERBOSE : print ( "Error processing file: " , os . path . abspath ( source ) , "\nReason:" , e ) pass
6,286
https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L90-L115
[ "def", "Add", "(", "self", ",", "other", ")", ":", "if", "len", "(", "self", ".", "data", ")", "!=", "len", "(", "other", ".", "data", ")", ":", "raise", "RuntimeError", "(", "\"Can only add series of identical lengths.\"", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "data", ")", ")", ":", "if", "self", ".", "data", "[", "i", "]", "[", "1", "]", "!=", "other", ".", "data", "[", "i", "]", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"Timestamp mismatch.\"", ")", "if", "self", ".", "data", "[", "i", "]", "[", "0", "]", "is", "None", "and", "other", ".", "data", "[", "i", "]", "[", "0", "]", "is", "None", ":", "continue", "self", ".", "data", "[", "i", "]", "[", "0", "]", "=", "(", "self", ".", "data", "[", "i", "]", "[", "0", "]", "or", "0", ")", "+", "(", "other", ".", "data", "[", "i", "]", "[", "0", "]", "or", "0", ")" ]
Test if path represents a valid URL .
def is_url ( path ) : try : parse_result = urlparse ( path ) return all ( ( parse_result . scheme , parse_result . netloc , parse_result . path ) ) except ValueError : return False
6,287
https://github.com/MoseleyBioinformaticsLab/mwtab/blob/8c0ae8ab2aa621662f99589ed41e481cf8b7152b/mwtab/fileio.py#L208-L219
[ "def", "lreshape", "(", "data", ",", "groups", ",", "dropna", "=", "True", ",", "label", "=", "None", ")", ":", "if", "isinstance", "(", "groups", ",", "dict", ")", ":", "keys", "=", "list", "(", "groups", ".", "keys", "(", ")", ")", "values", "=", "list", "(", "groups", ".", "values", "(", ")", ")", "else", ":", "keys", ",", "values", "=", "zip", "(", "*", "groups", ")", "all_cols", "=", "list", "(", "set", ".", "union", "(", "*", "[", "set", "(", "x", ")", "for", "x", "in", "values", "]", ")", ")", "id_cols", "=", "list", "(", "data", ".", "columns", ".", "difference", "(", "all_cols", ")", ")", "K", "=", "len", "(", "values", "[", "0", "]", ")", "for", "seq", "in", "values", ":", "if", "len", "(", "seq", ")", "!=", "K", ":", "raise", "ValueError", "(", "'All column lists must be same length'", ")", "mdata", "=", "{", "}", "pivot_cols", "=", "[", "]", "for", "target", ",", "names", "in", "zip", "(", "keys", ",", "values", ")", ":", "to_concat", "=", "[", "data", "[", "col", "]", ".", "values", "for", "col", "in", "names", "]", "import", "pandas", ".", "core", ".", "dtypes", ".", "concat", "as", "_concat", "mdata", "[", "target", "]", "=", "_concat", ".", "_concat_compat", "(", "to_concat", ")", "pivot_cols", ".", "append", "(", "target", ")", "for", "col", "in", "id_cols", ":", "mdata", "[", "col", "]", "=", "np", ".", "tile", "(", "data", "[", "col", "]", ".", "values", ",", "K", ")", "if", "dropna", ":", "mask", "=", "np", ".", "ones", "(", "len", "(", "mdata", "[", "pivot_cols", "[", "0", "]", "]", ")", ",", "dtype", "=", "bool", ")", "for", "c", "in", "pivot_cols", ":", "mask", "&=", "notna", "(", "mdata", "[", "c", "]", ")", "if", "not", "mask", ".", "all", "(", ")", ":", "mdata", "=", "{", "k", ":", "v", "[", "mask", "]", "for", "k", ",", "v", "in", "mdata", ".", "items", "(", ")", "}", "return", "data", ".", "_constructor", "(", "mdata", ",", "columns", "=", "id_cols", "+", "pivot_cols", ")" ]
Add authentication and authorization middleware to the app .
def AuthMiddleware ( app ) : # url_for mustn't be used here because AuthMiddleware is built once at startup, # url path can be reconstructed only on http requests (based on environ) basic_redirect_form = BasicRedirectFormPlugin ( login_form_url = "/signin" , login_handler_path = "/login" , post_login_url = "/" , logout_handler_path = "/logout" , post_logout_url = "/signin" , rememberer_name = "cookie" ) return setup_sql_auth ( app , user_class = model . User , group_class = model . Group , permission_class = model . Permission , dbsession = model . meta . Session , form_plugin = basic_redirect_form , cookie_secret = config [ 'cookie_secret' ] , translations = { 'user_name' : 'login' , 'users' : 'users' , 'group_name' : 'name' , 'groups' : 'groups' , 'permission_name' : 'name' , 'permissions' : 'permissions' , 'validate_password' : 'validate_password' } , )
6,288
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L154-L183
[ "def", "remove_node_by_value", "(", "self", ",", "value", ")", ":", "self", ".", "node_list", "=", "[", "node", "for", "node", "in", "self", ".", "node_list", "if", "node", ".", "value", "!=", "value", "]", "# Remove links pointing to the deleted node", "for", "node", "in", "self", ".", "node_list", ":", "node", ".", "link_list", "=", "[", "link", "for", "link", "in", "node", ".", "link_list", "if", "link", ".", "target", ".", "value", "!=", "value", "]" ]
Return the full path to path by prepending the SCRIPT_NAME . If path is a URL do nothing .
def _get_full_path ( self , path , environ ) : if path . startswith ( '/' ) : path = environ . get ( 'SCRIPT_NAME' , '' ) + path return path
6,289
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L130-L139
[ "def", "IOR", "(", "classical_reg1", ",", "classical_reg2", ")", ":", "left", ",", "right", "=", "unpack_reg_val_pair", "(", "classical_reg1", ",", "classical_reg2", ")", "return", "ClassicalInclusiveOr", "(", "left", ",", "right", ")" ]
Replace the query string of url with qs and return the new URL .
def _replace_qs ( self , url , qs ) : url_parts = list ( urlparse ( url ) ) url_parts [ 4 ] = qs return urlunparse ( url_parts )
6,290
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/auth.py#L141-L148
[ "def", "generate_displacements", "(", "self", ",", "distance", "=", "0.01", ",", "is_plusminus", "=", "'auto'", ",", "is_diagonal", "=", "True", ",", "is_trigonal", "=", "False", ")", ":", "displacement_directions", "=", "get_least_displacements", "(", "self", ".", "_symmetry", ",", "is_plusminus", "=", "is_plusminus", ",", "is_diagonal", "=", "is_diagonal", ",", "is_trigonal", "=", "is_trigonal", ",", "log_level", "=", "self", ".", "_log_level", ")", "displacement_dataset", "=", "directions_to_displacement_dataset", "(", "displacement_directions", ",", "distance", ",", "self", ".", "_supercell", ")", "self", ".", "set_displacement_dataset", "(", "displacement_dataset", ")" ]
write the current settings to the config file
def write ( self ) : with open ( storage . config_file , 'w' ) as cfg : yaml . dump ( self . as_dict ( ) , cfg , default_flow_style = False ) storage . refresh ( )
6,291
https://github.com/pjamesjoyce/lcopt/blob/3f1caca31fece4a3068a384900707e6d21d04597/lcopt/settings.py#L98-L103
[ "def", "Nu_vertical_cylinder", "(", "Pr", ",", "Gr", ",", "L", "=", "None", ",", "D", "=", "None", ",", "Method", "=", "None", ",", "AvailableMethods", "=", "False", ")", ":", "def", "list_methods", "(", ")", ":", "methods", "=", "[", "]", "for", "key", ",", "values", "in", "vertical_cylinder_correlations", ".", "items", "(", ")", ":", "if", "values", "[", "4", "]", "or", "all", "(", "(", "L", ",", "D", ")", ")", ":", "methods", ".", "append", "(", "key", ")", "if", "'Popiel & Churchill'", "in", "methods", ":", "methods", ".", "remove", "(", "'Popiel & Churchill'", ")", "methods", ".", "insert", "(", "0", ",", "'Popiel & Churchill'", ")", "elif", "'McAdams, Weiss & Saunders'", "in", "methods", ":", "methods", ".", "remove", "(", "'McAdams, Weiss & Saunders'", ")", "methods", ".", "insert", "(", "0", ",", "'McAdams, Weiss & Saunders'", ")", "return", "methods", "if", "AvailableMethods", ":", "return", "list_methods", "(", ")", "if", "not", "Method", ":", "Method", "=", "list_methods", "(", ")", "[", "0", "]", "if", "Method", "in", "vertical_cylinder_correlations", ":", "if", "vertical_cylinder_correlations", "[", "Method", "]", "[", "4", "]", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ")", "else", ":", "return", "vertical_cylinder_correlations", "[", "Method", "]", "[", "0", "]", "(", "Pr", "=", "Pr", ",", "Gr", "=", "Gr", ",", "L", "=", "L", ",", "D", "=", "D", ")", "else", ":", "raise", "Exception", "(", "\"Correlation name not recognized; see the \"", "\"documentation for the available options.\"", ")" ]
convert value from python object to json
def process_bind_param ( self , value , dialect ) : if value is not None : value = simplejson . dumps ( value ) return value
6,292
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/sa_types.py#L26-L30
[ "def", "catFiles", "(", "filesToCat", ",", "catFile", ")", ":", "if", "len", "(", "filesToCat", ")", "==", "0", ":", "#We must handle this case or the cat call will hang waiting for input", "open", "(", "catFile", ",", "'w'", ")", ".", "close", "(", ")", "return", "maxCat", "=", "25", "system", "(", "\"cat %s > %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]", "while", "len", "(", "filesToCat", ")", ">", "0", ":", "system", "(", "\"cat %s >> %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]" ]
convert value from json to a python object
def process_result_value ( self , value , dialect ) : if value is not None : value = simplejson . loads ( value ) return value
6,293
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/lib/sa_types.py#L32-L36
[ "def", "catFiles", "(", "filesToCat", ",", "catFile", ")", ":", "if", "len", "(", "filesToCat", ")", "==", "0", ":", "#We must handle this case or the cat call will hang waiting for input", "open", "(", "catFile", ",", "'w'", ")", ".", "close", "(", ")", "return", "maxCat", "=", "25", "system", "(", "\"cat %s > %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]", "while", "len", "(", "filesToCat", ")", ">", "0", ":", "system", "(", "\"cat %s >> %s\"", "%", "(", "\" \"", ".", "join", "(", "filesToCat", "[", ":", "maxCat", "]", ")", ",", "catFile", ")", ")", "filesToCat", "=", "filesToCat", "[", "maxCat", ":", "]" ]
Builds the brief module info from file
def getBriefModuleInfoFromFile ( fileName ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromFile ( modInfo , fileName ) modInfo . flush ( ) return modInfo
6,294
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L609-L614
[ "def", "face_index", "(", "vertices", ")", ":", "new_verts", "=", "[", "]", "face_indices", "=", "[", "]", "for", "wall", "in", "vertices", ":", "face_wall", "=", "[", "]", "for", "vert", "in", "wall", ":", "if", "new_verts", ":", "if", "not", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ".", "any", "(", ")", ":", "new_verts", ".", "append", "(", "vert", ")", "else", ":", "new_verts", ".", "append", "(", "vert", ")", "face_index", "=", "np", ".", "where", "(", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ")", "[", "0", "]", "[", "0", "]", "face_wall", ".", "append", "(", "face_index", ")", "face_indices", ".", "append", "(", "face_wall", ")", "return", "np", ".", "array", "(", "new_verts", ")", ",", "np", ".", "array", "(", "face_indices", ")" ]
Builds the brief module info from memory
def getBriefModuleInfoFromMemory ( content ) : modInfo = BriefModuleInfo ( ) _cdmpyparser . getBriefModuleInfoFromMemory ( modInfo , content ) modInfo . flush ( ) return modInfo
6,295
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L617-L622
[ "def", "face_index", "(", "vertices", ")", ":", "new_verts", "=", "[", "]", "face_indices", "=", "[", "]", "for", "wall", "in", "vertices", ":", "face_wall", "=", "[", "]", "for", "vert", "in", "wall", ":", "if", "new_verts", ":", "if", "not", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ".", "any", "(", ")", ":", "new_verts", ".", "append", "(", "vert", ")", "else", ":", "new_verts", ".", "append", "(", "vert", ")", "face_index", "=", "np", ".", "where", "(", "np", ".", "isclose", "(", "vert", ",", "new_verts", ")", ".", "all", "(", "axis", "=", "1", ")", ")", "[", "0", "]", "[", "0", "]", "face_wall", ".", "append", "(", "face_index", ")", "face_indices", ".", "append", "(", "face_wall", ")", "return", "np", ".", "array", "(", "new_verts", ")", ",", "np", ".", "array", "(", "face_indices", ")" ]
Provides a name for display purpose respecting the alias
def getDisplayName ( self ) : if self . alias == "" : return self . name return self . name + " as " + self . alias
6,296
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L124-L128
[ "def", "find_stream", "(", "cls", ",", "fileobj", ",", "max_bytes", ")", ":", "r", "=", "BitReader", "(", "fileobj", ")", "stream", "=", "cls", "(", "r", ")", "if", "stream", ".", "sync", "(", "max_bytes", ")", ":", "stream", ".", "offset", "=", "(", "r", ".", "get_position", "(", ")", "-", "12", ")", "//", "8", "return", "stream" ]
Flushes the collected information
def flush ( self ) : self . __flushLevel ( 0 ) if self . __lastImport is not None : self . imports . append ( self . __lastImport )
6,297
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L456-L460
[ "def", "update_dvportgroup", "(", "portgroup_ref", ",", "spec", ")", ":", "pg_name", "=", "get_managed_object_name", "(", "portgroup_ref", ")", "log", ".", "trace", "(", "'Updating portgrouo %s'", ",", "pg_name", ")", "try", ":", "task", "=", "portgroup_ref", ".", "ReconfigureDVPortgroup_Task", "(", "spec", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "wait_for_task", "(", "task", ",", "pg_name", ",", "six", ".", "text_type", "(", "task", ".", "__class__", ")", ")" ]
Merge the found objects to the required level
def __flushLevel ( self , level ) : objectsCount = len ( self . objectsStack ) while objectsCount > level : lastIndex = objectsCount - 1 if lastIndex == 0 : # We have exactly one element in the stack if self . objectsStack [ 0 ] . __class__ . __name__ == "Class" : self . classes . append ( self . objectsStack [ 0 ] ) else : self . functions . append ( self . objectsStack [ 0 ] ) self . objectsStack = [ ] break # Append to the previous level if self . objectsStack [ lastIndex ] . __class__ . __name__ == "Class" : self . objectsStack [ lastIndex - 1 ] . classes . append ( self . objectsStack [ lastIndex ] ) else : self . objectsStack [ lastIndex - 1 ] . functions . append ( self . objectsStack [ lastIndex ] ) del self . objectsStack [ lastIndex ] objectsCount -= 1
6,298
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L462-L486
[ "def", "is_alive", "(", "self", ")", ":", "null", "=", "chr", "(", "0", ")", "try", ":", "if", "self", ".", "device", "is", "None", ":", "return", "{", "\"is_alive\"", ":", "False", "}", "else", ":", "# Try sending ASCII null byte to maintain the connection alive", "self", ".", "_send_command", "(", "null", ")", "except", "(", "socket", ".", "error", ",", "EOFError", ")", ":", "# If unable to send, we can tell for sure that the connection is unusable,", "# hence return False.", "return", "{", "\"is_alive\"", ":", "False", "}", "return", "{", "\"is_alive\"", ":", "self", ".", "device", ".", "remote_conn", ".", "transport", ".", "is_active", "(", ")", "}" ]
Memorizes module encoding
def _onEncoding ( self , encString , line , pos , absPosition ) : self . encoding = Encoding ( encString , line , pos , absPosition )
6,299
https://github.com/SergeySatskiy/cdm-pythonparser/blob/7e933aca899b1853d744082313ffc3a8b1154505/cdmpyparser.py#L488-L490
[ "def", "setAccessRules", "(", "self", ",", "pid", ",", "public", "=", "False", ")", ":", "url", "=", "\"{url_base}/resource/accessRules/{pid}/\"", ".", "format", "(", "url_base", "=", "self", ".", "url_base", ",", "pid", "=", "pid", ")", "params", "=", "{", "'public'", ":", "public", "}", "r", "=", "self", ".", "_request", "(", "'PUT'", ",", "url", ",", "data", "=", "params", ")", "if", "r", ".", "status_code", "!=", "200", ":", "if", "r", ".", "status_code", "==", "403", ":", "raise", "HydroShareNotAuthorized", "(", "(", "'PUT'", ",", "url", ")", ")", "elif", "r", ".", "status_code", "==", "404", ":", "raise", "HydroShareNotFound", "(", "(", "pid", ",", ")", ")", "else", ":", "raise", "HydroShareHTTPException", "(", "(", "url", ",", "'PUT'", ",", "r", ".", "status_code", ",", "params", ")", ")", "resource", "=", "r", ".", "json", "(", ")", "assert", "(", "resource", "[", "'resource_id'", "]", "==", "pid", ")", "return", "resource", "[", "'resource_id'", "]" ]