query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
reads the value from the instrument
def value ( self ) : value = getattr ( self . instrument , self . probe_name ) self . buffer . append ( value ) return value
6,000
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/probe.py#L58-L66
[ "def", "largest_connected_submatrix", "(", "C", ",", "directed", "=", "True", ",", "lcc", "=", "None", ")", ":", "if", "lcc", "is", "None", ":", "lcc", "=", "largest_connected_set", "(", "C", ",", "directed", "=", "directed", ")", "\"\"\"Row slicing\"\"\"", "if", "scipy", ".", "sparse", ".", "issparse", "(", "C", ")", ":", "C_cc", "=", "C", ".", "tocsr", "(", ")", "else", ":", "C_cc", "=", "C", "C_cc", "=", "C_cc", "[", "lcc", ",", ":", "]", "\"\"\"Column slicing\"\"\"", "if", "scipy", ".", "sparse", ".", "issparse", "(", "C", ")", ":", "C_cc", "=", "C_cc", ".", "tocsc", "(", ")", "C_cc", "=", "C_cc", "[", ":", ",", "lcc", "]", "if", "scipy", ".", "sparse", ".", "issparse", "(", "C", ")", ":", "return", "C_cc", ".", "tocoo", "(", ")", "else", ":", "return", "C_cc" ]
load probes from probe_dict and append to probes if additional instruments are required create them and add them to instruments
def load_and_append ( probe_dict , probes , instruments = { } ) : loaded_failed = { } updated_probes = { } updated_probes . update ( probes ) updated_instruments = { } updated_instruments . update ( instruments ) # ===== load new instruments ======= new_instruments = list ( set ( probe_dict . keys ( ) ) - set ( probes . keys ( ) ) ) if new_instruments != [ ] : updated_instruments , failed = Instrument . load_and_append ( { instrument_name : instrument_name for instrument_name in new_instruments } , instruments ) if failed != [ ] : # if loading an instrument fails all the probes that depend on that instrument also fail # ignore the failed instrument that did exist already because they failed because they did exist for failed_instrument in set ( failed ) - set ( instruments . keys ( ) ) : for probe_name in probe_dict [ failed_instrument ] : loaded_failed [ probe_name ] = ValueError ( 'failed to load instrument {:s} already exists. Did not load!' . format ( failed_instrument ) ) del probe_dict [ failed_instrument ] # ===== now we are sure that all the instruments that we need for the probes already exist for instrument_name , probe_names in probe_dict . items ( ) : if not instrument_name in updated_probes : updated_probes . update ( { instrument_name : { } } ) for probe_name in probe_names . split ( ',' ) : if probe_name in updated_probes [ instrument_name ] : loaded_failed [ probe_name ] = ValueError ( 'failed to load probe {:s} already exists. Did not load!' . format ( probe_name ) ) else : probe_instance = Probe ( updated_instruments [ instrument_name ] , probe_name ) updated_probes [ instrument_name ] . update ( { probe_name : probe_instance } ) return updated_probes , loaded_failed , updated_instruments
6,001
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/probe.py#L106-L191
[ "def", "DeleteNotifications", "(", "self", ",", "session_ids", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "not", "session_ids", ":", "return", "for", "session_id", "in", "session_ids", ":", "if", "not", "isinstance", "(", "session_id", ",", "rdfvalue", ".", "SessionID", ")", ":", "raise", "RuntimeError", "(", "\"Can only delete notifications for rdfvalue.SessionIDs.\"", ")", "if", "start", "is", "None", ":", "start", "=", "0", "else", ":", "start", "=", "int", "(", "start", ")", "if", "end", "is", "None", ":", "end", "=", "self", ".", "frozen_timestamp", "or", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "for", "queue", ",", "ids", "in", "iteritems", "(", "collection", ".", "Group", "(", "session_ids", ",", "lambda", "session_id", ":", "session_id", ".", "Queue", "(", ")", ")", ")", ":", "queue_shards", "=", "self", ".", "GetAllNotificationShards", "(", "queue", ")", "self", ".", "data_store", ".", "DeleteNotifications", "(", "queue_shards", ",", "ids", ",", "start", ",", "end", ")" ]
Returns an address by user controlled input ID
def get ( self , key ) : try : return self [ self . id_lookup . get ( key ) ] except TypeError : raise KeyError
6,002
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/data.py#L87-L98
[ "def", "_save_action", "(", "extra_context", "=", "None", ")", ":", "# Get list of revisions to Conda environment since creation.", "revisions_js", "=", "ch", ".", "conda_exec", "(", "'list'", ",", "'--revisions'", ",", "'--json'", ",", "verbose", "=", "False", ")", "revisions", "=", "json", ".", "loads", "(", "revisions_js", ")", "# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`", "# See [wheeler-microfluidics/microdrop#200][i200].", "#", "# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200", "action", "=", "extra_context", ".", "copy", "(", ")", "if", "extra_context", "else", "{", "}", "action", "[", "'revisions'", "]", "=", "revisions", "action_path", "=", "(", "MICRODROP_CONDA_ACTIONS", ".", "joinpath", "(", "'rev{}.json.bz2'", ".", "format", "(", "revisions", "[", "-", "1", "]", "[", "'rev'", "]", ")", ")", ")", "action_path", ".", "parent", ".", "makedirs_p", "(", ")", "# Compress action file using bz2 to save disk space.", "with", "bz2", ".", "BZ2File", "(", "action_path", ",", "mode", "=", "'w'", ")", "as", "output", ":", "json", ".", "dump", "(", "action", ",", "output", ",", "indent", "=", "2", ")", "return", "action_path", ",", "action" ]
Returns an address by input index a value that matches the list index of the provided lookup value not necessarily the result .
def get_index ( self , key ) : try : return self [ self . index_lookup . get ( key ) ] except TypeError : raise KeyError
6,003
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/data.py#L100-L112
[ "def", "set_default_headers", "(", "self", ")", ":", "mod_opts", "=", "self", ".", "application", ".", "mod_opts", "if", "mod_opts", ".", "get", "(", "'cors_origin'", ")", ":", "origin", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "'Origin'", ")", "allowed_origin", "=", "_check_cors_origin", "(", "origin", ",", "mod_opts", "[", "'cors_origin'", "]", ")", "if", "allowed_origin", ":", "self", ".", "set_header", "(", "\"Access-Control-Allow-Origin\"", ",", "allowed_origin", ")" ]
Complemented incomplete Gamma integral .
def _igamc ( a , x ) : # Compute x**a * exp(-x) / Gamma(a) ax = math . exp ( a * math . log ( x ) - x - math . lgamma ( a ) ) # Continued fraction y = 1.0 - a z = x + y + 1.0 c = 0.0 pkm2 = 1.0 qkm2 = x pkm1 = x + 1.0 qkm1 = z * x ans = pkm1 / qkm1 while True : c += 1.0 y += 1.0 z += 2.0 yc = y * c pk = pkm1 * z - pkm2 * yc qk = qkm1 * z - qkm2 * yc if qk != 0 : r = pk / qk t = abs ( ( ans - r ) / r ) ans = r else : t = 1.0 pkm2 = pkm1 pkm1 = pk qkm2 = qkm1 qkm1 = qk if abs ( pk ) > BIG : pkm2 *= BIGINV pkm1 *= BIGINV qkm2 *= BIGINV qkm1 *= BIGINV if t <= MACHEP : return ans * ax
6,004
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/stats/chisq.py#L47-L110
[ "def", "remove", "(", "self", ")", ":", "for", "cgroup", "in", "self", ".", "paths", ":", "remove_cgroup", "(", "cgroup", ")", "del", "self", ".", "paths", "del", "self", ".", "per_subsystem" ]
The simplest usage of watershed delineation based on TauDEM .
def main ( ) : dem = '../tests/data/Jamaica_dem.tif' num_proc = 2 wp = '../tests/data/tmp_results/wtsd_delineation' TauDEMWorkflow . watershed_delineation ( num_proc , dem , workingdir = wp )
6,005
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/examples/ex04_watershed_delineation.py#L8-L14
[ "async", "def", "checkpoint_async", "(", "self", ",", "event_processor_context", "=", "None", ")", ":", "captured_checkpoint", "=", "Checkpoint", "(", "self", ".", "partition_id", ",", "self", ".", "offset", ",", "self", ".", "sequence_number", ")", "await", "self", ".", "persist_checkpoint_async", "(", "captured_checkpoint", ",", "event_processor_context", ")", "self", ".", "event_processor_context", "=", "event_processor_context" ]
Return the first line containing a set of strings in a file .
def _get_line ( self , search_string , search_file , return_string = True , case_sens = True ) : if os . path . isfile ( search_file ) : # if single search string if type ( search_string ) == type ( '' ) : search_string = [ search_string ] # if case insensitive, convert everything to lowercase if not case_sens : search_string = [ i . lower ( ) for i in search_string ] with open ( search_file ) as fp : # search for the strings line by line for line in fp : query_line = line if case_sens else line . lower ( ) if all ( [ i in query_line for i in search_string ] ) : return line if return_string else True if return_string : raise Exception ( '%s not found in %s' % ( ' & ' . join ( search_string ) , search_file ) ) else : return False else : raise Exception ( '%s file does not exist' % search_file )
6,006
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L66-L88
[ "def", "create_unbroadcast_axis", "(", "shape", ",", "broadcast_shape", ")", ":", "return", "tuple", "(", "-", "(", "1", "+", "i", ")", "for", "i", "in", "range", "(", "len", "(", "broadcast_shape", ")", ")", "if", "i", ">=", "len", "(", "shape", ")", "or", "broadcast_shape", "[", "-", "(", "1", "+", "i", ")", "]", ">", "shape", "[", "-", "(", "1", "+", "i", ")", "]", ")" ]
Determine the cutoff energy from the output
def get_cutoff_energy ( self ) : return Value ( scalars = [ Scalar ( value = self . settings [ "kinetic-energy cutoff" ] ) ] , units = self . settings [ 'kinetic-energy cutoff units' ] )
6,007
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L98-L103
[ "def", "open_book", "(", "self", ",", "for_writing", "=", "False", ")", "->", "piecash", ".", "Book", ":", "filename", "=", "None", "# check if the file path is already a URL.", "file_url", "=", "urllib", ".", "parse", ".", "urlparse", "(", "self", ".", "filename", ")", "if", "file_url", ".", "scheme", "==", "\"file\"", "or", "file_url", ".", "scheme", "==", "\"sqlite\"", ":", "filename", "=", "file_url", ".", "path", "[", "1", ":", "]", "else", ":", "filename", "=", "self", ".", "filename", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "log", "(", "WARN", ",", "\"Database %s requested but not found. Creating an in-memory book.\"", ",", "filename", ")", "return", "self", ".", "create_book", "(", ")", "access_type", "=", "\"read/write\"", "if", "for_writing", "else", "\"readonly\"", "log", "(", "INFO", ",", "\"Using %s in %s mode.\"", ",", "filename", ",", "access_type", ")", "# file_path = path.relpath(self.filename)", "file_path", "=", "path", ".", "abspath", "(", "filename", ")", "if", "not", "for_writing", ":", "book", "=", "piecash", ".", "open_book", "(", "file_path", ",", "open_if_lock", "=", "True", ")", "else", ":", "book", "=", "piecash", ".", "open_book", "(", "file_path", ",", "open_if_lock", "=", "True", ",", "readonly", "=", "False", ")", "# book = create_book()", "return", "book" ]
Determine the pseudopotential names from the output
def get_pp_name ( self ) : ppnames = [ ] # Find the number of atom types natomtypes = int ( self . _get_line ( 'number of atomic types' , self . outputf ) . split ( ) [ 5 ] ) # Find the pseudopotential names with open ( self . outputf ) as fp : for line in fp : if "PseudoPot. #" in line : ppnames . append ( Scalar ( value = next ( fp ) . split ( '/' ) [ - 1 ] . rstrip ( ) ) ) if len ( ppnames ) == natomtypes : return Value ( scalars = ppnames ) raise Exception ( 'Could not find %i pseudopotential names' % natomtypes )
6,008
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L171-L183
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Determine the DFT + U type and parameters from the output
def get_U_settings ( self ) : with open ( self . outputf ) as fp : for line in fp : if "LDA+U calculation" in line : U_param = { } U_param [ 'Type' ] = line . split ( ) [ 0 ] U_param [ 'Values' ] = { } # look through next several lines for nl in range ( 15 ) : line2 = next ( fp ) . split ( ) if len ( line2 ) > 1 and line2 [ 0 ] == "atomic" : pass # column titles elif len ( line2 ) == 6 : U_param [ 'Values' ] [ line2 [ 0 ] ] = { } U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'L' ] = float ( line2 [ 1 ] ) U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'U' ] = float ( line2 [ 2 ] ) U_param [ 'Values' ] [ line2 [ 0 ] ] [ 'J' ] = float ( line2 [ 4 ] ) else : break # end of data block return Value ( * * U_param ) return None
6,009
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L185-L205
[ "def", "detach_session", "(", "self", ")", ":", "if", "self", ".", "_session", "is", "not", "None", ":", "self", ".", "_session", ".", "unsubscribe", "(", "self", ")", "self", ".", "_session", "=", "None" ]
Determine the vdW type if using vdW xc functional or correction scheme from the input otherwise
def get_vdW_settings ( self ) : xc = self . get_xc_functional ( ) . scalars [ 0 ] . value if 'vdw' in xc . lower ( ) : # vdW xc functional return Value ( scalars = [ Scalar ( value = xc ) ] ) else : # look for vdw_corr in input vdW_dict = { 'xdm' : 'Becke-Johnson XDM' , 'ts' : 'Tkatchenko-Scheffler' , 'ts-vdw' : 'Tkatchenko-Scheffler' , 'tkatchenko-scheffler' : 'Tkatchenko-Scheffler' , 'grimme-d2' : 'Grimme D2' , 'dft-d' : 'Grimme D2' } if self . _get_line ( 'vdw_corr' , self . inputf , return_string = False , case_sens = False ) : line = self . _get_line ( 'vdw_corr' , self . inputf , return_string = True , case_sens = False ) vdwkey = str ( line . split ( '=' ) [ - 1 ] . replace ( "'" , "" ) . replace ( ',' , '' ) . lower ( ) . rstrip ( ) ) return Value ( scalars = [ Scalar ( value = vdW_dict [ vdwkey ] ) ] ) return None
6,010
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L207-L224
[ "def", "list_objects", "(", "self", ",", "instance", ",", "bucket_name", ",", "prefix", "=", "None", ",", "delimiter", "=", "None", ")", ":", "url", "=", "'/buckets/{}/{}'", ".", "format", "(", "instance", ",", "bucket_name", ")", "params", "=", "{", "}", "if", "prefix", "is", "not", "None", ":", "params", "[", "'prefix'", "]", "=", "prefix", "if", "delimiter", "is", "not", "None", ":", "params", "[", "'delimiter'", "]", "=", "delimiter", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "url", ",", "params", "=", "params", ")", "message", "=", "rest_pb2", ".", "ListObjectsResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "return", "ObjectListing", "(", "message", ",", "instance", ",", "bucket_name", ",", "self", ")" ]
Determine the stress tensor from the output
def get_stresses ( self ) : if "stress" not in self . settings : return None wrapped = [ [ Scalar ( value = x ) for x in y ] for y in self . settings [ "stress" ] ] return Property ( matrices = [ wrapped ] , units = self . settings [ "stress units" ] )
6,011
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L230-L235
[ "def", "remove_server", "(", "self", ",", "server_id", ")", ":", "# Validate server_id", "server", "=", "self", ".", "_get_server", "(", "server_id", ")", "# Delete any instances we recorded to be cleaned up", "if", "server_id", "in", "self", ".", "_owned_subscriptions", ":", "inst_list", "=", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_subscriptions", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_filters", ":", "inst_list", "=", "self", ".", "_owned_filters", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_filters", "[", "server_id", "]", "if", "server_id", "in", "self", ".", "_owned_destinations", ":", "inst_list", "=", "self", ".", "_owned_destinations", "[", "server_id", "]", "# We iterate backwards because we change the list", "for", "i", "in", "six", ".", "moves", ".", "range", "(", "len", "(", "inst_list", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "inst", "=", "inst_list", "[", "i", "]", "server", ".", "conn", ".", "DeleteInstance", "(", "inst", ".", "path", ")", "del", "inst_list", "[", "i", "]", "del", "self", ".", "_owned_destinations", "[", "server_id", "]", "# Remove server from this listener", "del", "self", ".", "_servers", "[", "server_id", "]" ]
Find the total DOS shifted by the Fermi energy
def get_dos ( self ) : # find the dos file fildos = '' for f in self . _files : with open ( f , 'r' ) as fp : first_line = next ( fp ) if "E (eV)" in first_line and "Int dos(E)" in first_line : fildos = f ndoscol = len ( next ( fp ) . split ( ) ) - 2 # number of spin channels fp . close ( ) break fp . close ( ) if not fildos : return None # cannot find DOS # get the Fermi energy line = self . _get_line ( 'the Fermi energy is' , self . outputf ) efermi = float ( line . split ( 'is' ) [ - 1 ] . split ( ) [ 0 ] ) # grab the DOS energy = [ ] dos = [ ] fp = open ( fildos , 'r' ) next ( fp ) # comment line for line in fp : ls = line . split ( ) energy . append ( Scalar ( value = float ( ls [ 0 ] ) - efermi ) ) dos . append ( Scalar ( value = sum ( [ float ( i ) for i in ls [ 1 : 1 + ndoscol ] ] ) ) ) return Property ( scalars = dos , units = 'number of states per unit cell' , conditions = Value ( name = 'energy' , scalars = energy , units = 'eV' ) )
6,012
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L319-L347
[ "def", "is_connected", "(", "self", ",", "attempts", "=", "3", ")", ":", "if", "self", ".", "gce", "is", "None", ":", "while", "attempts", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Attempting to connect ...\"", ")", "try", ":", "self", ".", "connect", "(", ")", "except", "ComputeEngineManagerException", ":", "attempts", "-=", "1", "continue", "self", ".", "logger", ".", "info", "(", "\"Connection established.\"", ")", "return", "True", "self", ".", "logger", ".", "error", "(", "\"Unable to connect to Google Compute Engine.\"", ")", "return", "False", "return", "True" ]
Compute the band gap from the DOS
def get_band_gap ( self ) : dosdata = self . get_dos ( ) if type ( dosdata ) == type ( None ) : return None # cannot find DOS else : energy = dosdata . conditions . scalars dos = dosdata . scalars step_size = energy [ 1 ] . value - energy [ 0 ] . value not_found = True l = 0 bot = 10 ** 3 top = - 10 ** 3 while not_found and l < len ( dos ) : # iterate through the data e = float ( energy [ l ] . value ) dens = float ( dos [ l ] . value ) # note: dos already shifted by efermi if e < 0 and dens > 1e-3 : bot = e elif e > 0 and dens > 1e-3 : top = e not_found = False l += 1 if top < bot : raise Exception ( 'Algorithm failed to find the band gap' ) elif top - bot < step_size * 2 : return Property ( scalars = [ Scalar ( value = 0 ) ] , units = 'eV' ) else : bandgap = float ( top - bot ) return Property ( scalars = [ Scalar ( value = round ( bandgap , 3 ) ) ] , units = 'eV' )
6,013
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/pwscf.py#L369-L396
[ "def", "userstream_user", "(", "self", ",", "delegate", ",", "stall_warnings", "=", "None", ",", "with_", "=", "'followings'", ",", "replies", "=", "None", ")", ":", "params", "=", "{", "'stringify_friend_ids'", ":", "'true'", "}", "set_bool_param", "(", "params", ",", "'stall_warnings'", ",", "stall_warnings", ")", "set_str_param", "(", "params", ",", "'with'", ",", "with_", ")", "set_str_param", "(", "params", ",", "'replies'", ",", "replies", ")", "svc", "=", "TwitterStreamService", "(", "lambda", ":", "self", ".", "_get_userstream", "(", "'user.json'", ",", "params", ")", ",", "delegate", ")", "return", "svc" ]
Returns a list of category aliases under the given parent .
def get_category_aliases_under ( parent_alias = None ) : return [ ch . alias for ch in get_cache ( ) . get_children_for ( parent_alias , only_with_aliases = True ) ]
6,014
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L16-L26
[ "def", "get_rsa_key", "(", "self", ",", "username", ")", ":", "try", ":", "resp", "=", "self", ".", "session", ".", "post", "(", "'https://steamcommunity.com/login/getrsakey/'", ",", "timeout", "=", "15", ",", "data", "=", "{", "'username'", ":", "username", ",", "'donotchache'", ":", "int", "(", "time", "(", ")", "*", "1000", ")", ",", "}", ",", ")", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "HTTPError", "(", "str", "(", "e", ")", ")", "return", "resp" ]
Returns a list of CategoryList objects optionally associated with a given model instance .
def get_category_lists ( init_kwargs = None , additional_parents_aliases = None , obj = None ) : init_kwargs = init_kwargs or { } additional_parents_aliases = additional_parents_aliases or [ ] parent_aliases = additional_parents_aliases if obj is not None : ctype = ContentType . objects . get_for_model ( obj ) cat_ids = [ item [ 0 ] for item in get_tie_model ( ) . objects . filter ( content_type = ctype , object_id = obj . id ) . values_list ( 'category_id' ) . all ( ) ] parent_aliases = list ( get_cache ( ) . get_parents_for ( cat_ids ) . union ( additional_parents_aliases ) ) lists = [ ] aliases = get_cache ( ) . sort_aliases ( parent_aliases ) categories_cache = get_cache ( ) . get_categories ( aliases , obj ) for parent_alias in aliases : catlist = CategoryList ( parent_alias , * * init_kwargs ) # TODO Burned in class name. Make more customizable. if obj is not None : catlist . set_obj ( obj ) # Optimization. To get DB hits down. cache = [ ] try : cache = categories_cache [ parent_alias ] except KeyError : pass catlist . set_get_categories_cache ( cache ) lists . append ( catlist ) return lists
6,015
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L29-L68
[ "def", "get_rsa_key", "(", "self", ",", "username", ")", ":", "try", ":", "resp", "=", "self", ".", "session", ".", "post", "(", "'https://steamcommunity.com/login/getrsakey/'", ",", "timeout", "=", "15", ",", "data", "=", "{", "'username'", ":", "username", ",", "'donotchache'", ":", "int", "(", "time", "(", ")", "*", "1000", ")", ",", "}", ",", ")", ".", "json", "(", ")", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "raise", "HTTPError", "(", "str", "(", "e", ")", ")", "return", "resp" ]
Registers CategoryList objects to handle their requests .
def register_lists ( self , category_lists , lists_init_kwargs = None , editor_init_kwargs = None ) : lists_init_kwargs = lists_init_kwargs or { } editor_init_kwargs = editor_init_kwargs or { } for lst in category_lists : if isinstance ( lst , string_types ) : # Spawn CategoryList object from base category alias. lst = self . list_cls ( lst , * * lists_init_kwargs ) elif not isinstance ( lst , CategoryList ) : raise SitecatsConfigurationError ( '`CategoryRequestHandler.register_lists()` accepts only ' '`CategoryList` objects or category aliases.' ) if self . _obj : lst . set_obj ( self . _obj ) for name , val in lists_init_kwargs . items ( ) : # Setting CategoryList attributes from kwargs. setattr ( lst , name , val ) lst . enable_editor ( * * editor_init_kwargs ) self . _lists [ lst . get_id ( ) ] = lst
6,016
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L254-L280
[ "def", "_get_options", "(", "ret", "=", "None", ")", ":", "attrs", "=", "{", "'url'", ":", "'url'", ",", "'db'", ":", "'db'", ",", "'user'", ":", "'user'", ",", "'passwd'", ":", "'passwd'", ",", "'redact_pws'", ":", "'redact_pws'", ",", "'minimum_return'", ":", "'minimum_return'", "}", "_options", "=", "salt", ".", "returners", ".", "get_returner_options", "(", "__virtualname__", ",", "ret", ",", "attrs", ",", "__salt__", "=", "__salt__", ",", "__opts__", "=", "__opts__", ")", "if", "'url'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Using default url.\"", ")", "_options", "[", "'url'", "]", "=", "\"http://salt:5984/\"", "if", "'db'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Using default database.\"", ")", "_options", "[", "'db'", "]", "=", "\"salt\"", "if", "'user'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not athenticating with a user.\"", ")", "_options", "[", "'user'", "]", "=", "None", "if", "'passwd'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not athenticating with a password.\"", ")", "_options", "[", "'passwd'", "]", "=", "None", "if", "'redact_pws'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not redacting passwords.\"", ")", "_options", "[", "'redact_pws'", "]", "=", "None", "if", "'minimum_return'", "not", "in", "_options", ":", "log", ".", "debug", "(", "\"Not minimizing the return object.\"", ")", "_options", "[", "'minimum_return'", "]", "=", "None", "return", "_options" ]
Handles remove action from CategoryList editor .
def action_remove ( cls , request , category_list ) : if not category_list . editor . allow_remove : raise SitecatsSecurityException ( '`action_remove()` is not supported by parent `%s`category.' % category_list . alias ) category_id = int ( request . POST . get ( 'category_id' , 0 ) ) if not category_id : raise SitecatsSecurityException ( 'Unsupported `category_id` value - `%s` - is passed to `action_remove()`.' % category_id ) category = get_cache ( ) . get_category_by_id ( category_id ) if not category : raise SitecatsSecurityException ( 'Unable to get `%s` category in `action_remove()`.' % category_id ) cat_ident = category . alias or category . id if category . is_locked : raise SitecatsSecurityException ( '`action_remove()` is not supported by `%s` category.' % cat_ident ) if category . parent_id != category_list . get_id ( ) : raise SitecatsSecurityException ( '`action_remove()` is unable to remove `%s`: ' 'not a child of parent `%s` category.' % ( cat_ident , category_list . alias ) ) min_num = category_list . editor . min_num def check_min_num ( num ) : if min_num is not None and num - 1 < min_num : subcats_str = ungettext_lazy ( 'subcategory' , 'subcategories' , min_num ) error_msg = _ ( 'Unable to remove "%(target_category)s" category from "%(parent_category)s": ' 'parent category requires at least %(num)s %(subcats_str)s.' ) % { 'target_category' : category . title , 'parent_category' : category_list . get_title ( ) , 'num' : min_num , 'subcats_str' : subcats_str } raise SitecatsValidationError ( error_msg ) child_ids = get_cache ( ) . get_child_ids ( category_list . alias ) check_min_num ( len ( child_ids ) ) if category_list . obj is None : # Remove category itself and children. category . delete ( ) else : # Remove just a category-to-object tie. # TODO filter user/status check_min_num ( category_list . obj . get_ties_for_categories_qs ( child_ids ) . count ( ) ) category_list . obj . remove_from_category ( category ) return True
6,017
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L283-L342
[ "def", "getOverlayTransformTrackedDeviceRelative", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceRelative", "punTrackedDevice", "=", "TrackedDeviceIndex_t", "(", ")", "pmatTrackedDeviceToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "punTrackedDevice", ")", ",", "byref", "(", "pmatTrackedDeviceToOverlayTransform", ")", ")", "return", "result", ",", "punTrackedDevice", ",", "pmatTrackedDeviceToOverlayTransform" ]
Handles add action from CategoryList editor .
def action_add ( cls , request , category_list ) : if not category_list . editor . allow_add : raise SitecatsSecurityException ( '`action_add()` is not supported by `%s` category.' % category_list . alias ) titles = request . POST . get ( 'category_title' , '' ) . strip ( ) if not titles : raise SitecatsSecurityException ( 'Unsupported `category_title` value - `%s` - is passed to `action_add()`.' % titles ) if category_list . editor . category_separator is None : titles = [ titles ] else : titles = [ title . strip ( ) for title in titles . split ( category_list . editor . category_separator ) if title . strip ( ) ] def check_max_num ( num , max_num , category_title ) : if max_num is not None and num + 1 > max_num : subcats_str = ungettext_lazy ( 'subcategory' , 'subcategories' , max_num ) error_msg = _ ( 'Unable to add "%(target_category)s" category into "%(parent_category)s": ' 'parent category can have at most %(num)s %(subcats_str)s.' ) % { 'target_category' : category_title , 'parent_category' : category_list . get_title ( ) , 'num' : max_num , 'subcats_str' : subcats_str } raise SitecatsValidationError ( error_msg ) target_category = None for category_title in titles : exists = get_cache ( ) . find_category ( category_list . alias , category_title ) if exists and category_list . obj is None : # Already exists. return exists if not exists and not category_list . editor . allow_new : error_msg = _ ( 'Unable to create a new "%(new_category)s" category inside of "%(parent_category)s": ' 'parent category does not support this action.' ) % { 'new_category' : category_title , 'parent_category' : category_list . get_title ( ) } raise SitecatsNewCategoryException ( error_msg ) max_num = category_list . editor . max_num child_ids = get_cache ( ) . get_child_ids ( category_list . alias ) if not exists : # Add new category. if category_list . obj is None : check_max_num ( len ( child_ids ) , max_num , category_title ) # TODO status target_category = get_category_model ( ) . add ( category_title , request . user , parent = category_list . get_category_model ( ) ) else : target_category = exists # Use existing one for a tie. if category_list . obj is not None : # TODO status check_max_num ( category_list . obj . get_ties_for_categories_qs ( child_ids ) . count ( ) , max_num , category_title ) category_list . obj . add_to_category ( target_category , request . user ) return target_category
6,018
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L345-L418
[ "def", "getOverlayTransformTrackedDeviceRelative", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceRelative", "punTrackedDevice", "=", "TrackedDeviceIndex_t", "(", ")", "pmatTrackedDeviceToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "punTrackedDevice", ")", ",", "byref", "(", "pmatTrackedDeviceToOverlayTransform", ")", ")", "return", "result", ",", "punTrackedDevice", ",", "pmatTrackedDeviceToOverlayTransform" ]
To shrink a PNG image post the data to the API service . The response is a JSON message . The initial request must be authorized with HTTP Basic authorization .
def shrink ( image , apikey ) : def _handle_response ( response ) : body = json . loads ( response . read ( ) ) if response . code == TinyPNGResponse . SUCCESS_CODE : body [ 'location' ] = response . headers . getheader ( "Location" ) try : body [ 'bytes' ] = urlopen ( body [ 'location' ] ) . read ( ) except : body [ 'bytes' ] = None return response . code , body auth = b64encode ( bytes ( "api:" + apikey ) ) . decode ( "ascii" ) request = Request ( TINYPNG_SHRINK_URL , image ) request . add_header ( "Authorization" , "Basic %s" % auth ) try : response = urlopen ( request ) ( code , response_dict ) = _handle_response ( response ) except HTTPError as e : ( code , response_dict ) = _handle_response ( e ) return TinyPNGResponse ( code , * * response_dict )
6,019
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/api.py#L10-L41
[ "def", "convert", "(", "self", ",", "targetunits", ")", ":", "nunits", "=", "units", ".", "Units", "(", "targetunits", ")", "self", ".", "waveunits", "=", "nunits" ]
Setup URLS and download dependencies for Python 3 . 6 +
def download_and_install_dependencies ( ) : try : import requests except ImportError : raise ValueError ( "Python 3.6+ is required." ) dependencies = { "hmm_databases" : HMM_URL } if sys . platform . startswith ( "linux" ) or "bsd" in sys . platform : dependencies [ "prodigal" ] = "{}.linux" . format ( BASE_PRODIGAL ) dependencies [ "louvain" ] = ( "https://lip6.github.io/Louvain-BinaryBuild/" "louvain_linux.tar.gz" ) elif sys . platform == "darwin" : dependencies [ "prodigal" ] = "{}.osx.10.9.5" . format ( BASE_PRODIGAL ) dependencies [ "louvain" ] = ( "https://github.com/lip6/Louvain-BinaryBuilds/raw/osx/" "louvain_osx.tar.gz" ) elif sys . platform . startswith ( "win" ) or sys . platform == "cygwin" : dependencies [ "prodigal" ] = "{}.windows.exe" dependencies [ "louvain" ] = ( "https://ci.appveyor.com/api/projects/yanntm/" "Louvain-BinaryBuild/artifacts/website/" "louvain_windows.tar.gz" ) else : raise NotImplementedError ( "Your platform is not supported: {}" . format ( sys . platform ) ) cache_dir = pathlib . Path . cwd ( ) / pathlib . Path ( "cache" ) try : print ( "Downloading dependencies..." ) cache_dir . mkdir ( ) for dependency_name , url in dependencies . items ( ) : print ( "Downloading {} at {}" . format ( dependency_name , url ) ) request = requests . get ( url ) basename = url . split ( "/" ) [ - 1 ] with open ( cache_dir / basename , "wb" ) as handle : print ( dependency_name , basename , cache_dir / basename ) handle . write ( request . content ) except FileExistsError : print ( "Using cached dependencies..." ) share_dir = pathlib . Path . cwd ( ) tools_dir = share_dir / "tools" louvain_dir = tools_dir / "louvain" louvain_dir . mkdir ( parents = True , exist_ok = True ) louvain_basename = dependencies [ "louvain" ] . split ( "/" ) [ - 1 ] louvain_path = louvain_dir / louvain_basename ( cache_dir / louvain_basename ) . replace ( louvain_path ) with tarfile . open ( louvain_path , "r:gz" ) as tar : tar . extractall ( ) hmm_basename = dependencies [ "hmm_databases" ] . split ( "/" ) [ - 1 ] hmm_path = share_dir / hmm_basename ( cache_dir / hmm_basename ) . replace ( hmm_path ) prodigal_basename = dependencies [ "prodigal" ] . split ( "/" ) [ - 1 ] prodigal_path = tools_dir / "prodigal" ( cache_dir / prodigal_basename ) . replace ( prodigal_path )
6,020
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/metator.py#L20-L94
[ "def", "register", "(", "cls", ",", "name", ",", "description", ",", "options_registrar_cls", "=", "None", ")", ":", "goal", "=", "cls", ".", "by_name", "(", "name", ")", "goal", ".", "_description", "=", "description", "goal", ".", "_options_registrar_cls", "=", "(", "options_registrar_cls", ".", "registrar_for_scope", "(", "name", ")", "if", "options_registrar_cls", "else", "None", ")", "return", "goal" ]
method to fetch all contents as a list
def get ( self ) : ret_list = [ ] if hasattr ( self , "font" ) : ret_list . append ( self . font ) if hasattr ( self , "size" ) : ret_list . append ( self . size ) if hasattr ( self , "text" ) : ret_list . append ( self . text ) return ret_list
6,021
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/ItemClasses/Directions.py#L33-L46
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Extract the text in between a prefix and surfix . It use non - greedy match .
def extract_by_prefix_surfix ( text , prefix , surfix , minlen = None , maxlen = None , include = False ) : if minlen is None : minlen = 0 if maxlen is None : maxlen = 2 ** 30 pattern = r"""(?<=%s)[\s\S]{%s,%s}?(?=%s)""" % ( prefix , minlen , maxlen , surfix ) if include : return [ prefix + s + surfix for s in re . findall ( pattern , text ) ] else : return re . findall ( pattern , text )
6,022
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L13-L48
[ "def", "delete_table", "(", "self", ",", "table", ")", ":", "table", "=", "table", ".", "get_soap_object", "(", "self", ".", "client", ")", "return", "self", ".", "call", "(", "'deleteTable'", ",", "table", ")" ]
Extract digit character from text .
def extract_number ( text ) : result = list ( ) chunk = list ( ) valid_char = set ( ".1234567890" ) for char in text : if char in valid_char : chunk . append ( char ) else : result . append ( "" . join ( chunk ) ) chunk = list ( ) result . append ( "" . join ( chunk ) ) result_new = list ( ) for number in result : if "." in number : try : result_new . append ( float ( number ) ) except : pass else : try : result_new . append ( int ( number ) ) except : pass return result_new
6,023
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L51-L78
[ "def", "get_traindata", "(", "self", ")", "->", "np", ".", "ndarray", ":", "traindata", "=", "None", "for", "key", ",", "value", "in", "self", ".", "data", ".", "items", "(", ")", ":", "if", "key", "not", "in", "[", "'__header__'", ",", "'__version__'", ",", "'__globals__'", "]", ":", "if", "traindata", "is", "None", ":", "traindata", "=", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", "else", ":", "traindata", "=", "np", ".", "concatenate", "(", "(", "traindata", ",", "value", "[", "np", ".", "where", "(", "value", "[", ":", ",", "4", "]", "!=", "0", ")", "]", ")", ")", "return", "traindata" ]
Extract email from text .
def extract_email ( text ) : result = list ( ) for tp in re . findall ( _regex_extract_email , text . lower ( ) ) : for email in tp : if re . match ( _regex_validate_email , email ) : result . append ( email ) return result
6,024
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/rerecipe.py#L87-L95
[ "def", "_check_zone_exception_for_date", "(", "self", ",", "item", ",", "timestamp", ",", "data_dict", ",", "data_index_dict", ")", ":", "if", "item", "in", "data_index_dict", ":", "for", "item", "in", "data_index_dict", "[", "item", "]", ":", "# startdate < timestamp", "if", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "not", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "START", "]", "<", "timestamp", ":", "return", "data_dict", "[", "item", "]", "[", "const", ".", "CQZ", "]", "# enddate > timestamp", "elif", "not", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "END", "]", ">", "timestamp", ":", "return", "data_dict", "[", "item", "]", "[", "const", ".", "CQZ", "]", "# startdate > timestamp > enddate", "elif", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "if", "data_dict", "[", "item", "]", "[", "const", ".", "START", "]", "<", "timestamp", "and", "data_dict", "[", "item", "]", "[", "const", ".", "END", "]", ">", "timestamp", ":", "return", "data_dict", "[", "item", "]", "[", "const", ".", "CQZ", "]", "# no startdate or enddate available", "elif", "not", "const", ".", "START", "in", "data_dict", "[", "item", "]", "and", "not", "const", ".", "END", "in", "data_dict", "[", "item", "]", ":", "return", "data_dict", "[", "item", "]", "[", "const", ".", "CQZ", "]", "raise", "KeyError" ]
Add Signature Authorization header to case - insensitive header dict .
def sign ( self , headers : Mapping , method = None , path = None ) : required_headers = self . header_list message = generate_message ( required_headers , headers , method , path ) signature = encode_string ( self . _signer . sign ( message ) , 'base64' ) ret_headers = multidict . CIMultiDict ( headers ) ret_headers [ 'Authorization' ] = self . _signature_tpl % signature . decode ( 'ascii' ) return ret_headers
6,025
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/headers.py#L60-L77
[ "def", "unlink_rich_menu_from_user", "(", "self", ",", "user_id", ",", "timeout", "=", "None", ")", ":", "self", ".", "_delete", "(", "'/v2/bot/user/{user_id}/richmenu'", ".", "format", "(", "user_id", "=", "user_id", ")", ",", "timeout", "=", "timeout", ")" ]
Parse Signature Authorization header and verify signature
async def verify ( self , headers : Mapping , method = None , path = None ) : if not 'authorization' in headers : return False auth_type , auth_params = parse_authorization_header ( headers [ 'authorization' ] ) if auth_type . lower ( ) != 'signature' : return False for param in ( 'algorithm' , 'keyId' , 'signature' ) : if param not in auth_params : raise VerifierException ( "Unsupported HTTP signature, missing '{}'" . format ( param ) ) auth_headers = ( auth_params . get ( 'headers' ) or 'date' ) . lower ( ) . strip ( ) . split ( ) missing_reqd = set ( self . _required_headers ) - set ( auth_headers ) if missing_reqd : error_headers = ', ' . join ( missing_reqd ) raise VerifierException ( 'One or more required headers not provided: {}' . format ( error_headers ) ) key_id , algo = auth_params [ 'keyId' ] , auth_params [ 'algorithm' ] if not self . _handlers . supports ( algo ) : raise VerifierException ( "Unsupported HTTP signature algorithm '{}'" . format ( algo ) ) pubkey = await self . _key_finder . find_key ( key_id , algo ) if not pubkey : raise VerifierException ( "Cannot locate public key for '{}'" . format ( key_id ) ) LOGGER . debug ( "Got %s public key for '%s': %s" , algo , key_id , pubkey ) handler = self . _handlers . create_verifier ( algo , pubkey ) message = generate_message ( auth_headers , headers , method , path ) signature = auth_params [ 'signature' ] raw_signature = decode_string ( signature , 'base64' ) if handler . verify ( message , raw_signature ) : return { 'verified' : True , 'algorithm' : algo , 'headers' : auth_headers , 'keyId' : key_id , 'key' : pubkey , 'signature' : signature } raise VerifierException ( "Signature could not be verified for keyId '{}'" . format ( key_id ) )
6,026
https://github.com/PSPC-SPAC-buyandsell/didauth/blob/e242fff8eddebf6ed52a65b161a229cdfbf5226e/didauth/headers.py#L92-L147
[ "def", "WriteBlobsWithUnknownHashes", "(", "self", ",", "blobs_data", ")", ":", "blobs_ids", "=", "[", "rdf_objects", ".", "BlobID", ".", "FromBlobData", "(", "d", ")", "for", "d", "in", "blobs_data", "]", "self", ".", "WriteBlobs", "(", "dict", "(", "zip", "(", "blobs_ids", ",", "blobs_data", ")", ")", ")", "return", "blobs_ids" ]
match the argv for each usages return dict .
def docpie ( self , argv = None ) : token = self . _prepare_token ( argv ) # check first, raise after # so `-hwhatever` can trigger `-h` first self . check_flag_and_handler ( token ) if token . error is not None : # raise DocpieExit('%s\n\n%s' % (token.error, help_msg)) self . exception_handler ( token . error ) try : result , dashed = self . _match ( token ) except DocpieExit as e : self . exception_handler ( e ) # if error is not None: # self.exception_handler(error) value = result . get_value ( self . appeared_only , False ) self . clear ( ) self . update ( value ) if self . appeared_only : self . _drop_non_appeared ( ) logger . debug ( 'get all matched value %s' , self ) rest = list ( self . usages ) # a copy rest . remove ( result ) self . _add_rest_value ( rest ) logger . debug ( 'merged rest values, now %s' , self ) self . _add_option_value ( ) self . _dashes_value ( dashed ) return dict ( self )
6,027
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L128-L168
[ "def", "layer_to_solr", "(", "self", ",", "layer", ")", ":", "success", "=", "True", "message", "=", "'Synced layer id %s to Solr'", "%", "layer", ".", "id", "layer_dict", ",", "message", "=", "layer2dict", "(", "layer", ")", "if", "not", "layer_dict", ":", "success", "=", "False", "else", ":", "layer_json", "=", "json", ".", "dumps", "(", "layer_dict", ")", "try", ":", "url_solr_update", "=", "'%s/solr/hypermap/update/json/docs'", "%", "SEARCH_URL", "headers", "=", "{", "\"content-type\"", ":", "\"application/json\"", "}", "params", "=", "{", "\"commitWithin\"", ":", "1500", "}", "res", "=", "requests", ".", "post", "(", "url_solr_update", ",", "data", "=", "layer_json", ",", "params", "=", "params", ",", "headers", "=", "headers", ")", "res", "=", "res", ".", "json", "(", ")", "if", "'error'", "in", "res", ":", "success", "=", "False", "message", "=", "\"Error syncing layer id %s to Solr: %s\"", "%", "(", "layer", ".", "id", ",", "res", "[", "\"error\"", "]", ".", "get", "(", "\"msg\"", ")", ")", "except", "Exception", ",", "e", ":", "success", "=", "False", "message", "=", "\"Error syncing layer id %s to Solr: %s\"", "%", "(", "layer", ".", "id", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ")", "LOGGER", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "if", "success", ":", "LOGGER", ".", "info", "(", "message", ")", "else", ":", "LOGGER", ".", "error", "(", "message", ")", "return", "success", ",", "message" ]
return a new cloned error
def clone_exception ( error , args ) : new_error = error . __class__ ( * args ) new_error . __dict__ = error . __dict__ return new_error
6,028
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L425-L454
[ "def", "_format_exe_info", "(", "py_len", ",", "exeinfo", ",", "format", ",", "indlevel", ")", ":", "ret", "=", "[", "]", "ind", "=", "\" \"", "*", "indlevel", "*", "NIND", "if", "format", ".", "startswith", "(", "\"text\"", ")", "else", "\"\"", "if", "format", "==", "\"markdown-list\"", ":", "for", "si", "in", "exeinfo", ":", "ret", ".", "append", "(", "\" - `{0!s}`: {1!s}\"", ".", "format", "(", "si", ".", "filename", ",", "si", ".", "description", ")", ")", "if", "format", "==", "\"rest-list\"", ":", "for", "si", "in", "exeinfo", ":", "ret", ".", "append", "(", "\"* ``{0!s}``: {1!s}\"", ".", "format", "(", "si", ".", "filename", ",", "si", ".", "description", ")", ")", "elif", "format", "==", "\"markdown-table\"", ":", "mask", "=", "\"%-{0:d}s | %s\"", ".", "format", "(", "py_len", "+", "2", ")", "ret", ".", "append", "(", "mask", "%", "(", "\"Script name\"", ",", "\"Purpose\"", ")", ")", "ret", ".", "append", "(", "\"-\"", "*", "(", "py_len", "+", "3", ")", "+", "\"|\"", "+", "\"-\"", "*", "10", ")", "for", "si", "in", "exeinfo", ":", "ret", ".", "append", "(", "mask", "%", "(", "\"`{0!s}`\"", ".", "format", "(", "si", ".", "filename", ")", ",", "si", ".", "description", ")", ")", "elif", "format", "==", "\"text\"", ":", "sbc", "=", "1", "# spaces between columns\r", "for", "si", "in", "exeinfo", ":", "ss", "=", "textwrap", ".", "wrap", "(", "si", ".", "description", ",", "79", "-", "py_len", "-", "sbc", "-", "indlevel", "*", "NIND", ")", "for", "i", ",", "s", "in", "enumerate", "(", "ss", ")", ":", "if", "i", "==", "0", ":", "filecolumn", "=", "si", ".", "filename", "+", "\" \"", "+", "(", "\".\"", "*", "(", "py_len", "-", "len", "(", "si", ".", "filename", ")", ")", ")", "else", ":", "filecolumn", "=", "\" \"", "*", "(", "py_len", "+", "1", ")", "ret", ".", "append", "(", "\"{}{}{}{}\"", ".", "format", "(", "ind", ",", "filecolumn", ",", "\" \"", "*", "sbc", ",", "s", ")", ")", "ret", ".", "append", "(", "\"\"", ")", "return", "ret" ]
Convert Docpie into a JSONlizable dict .
def to_dict ( self ) : # cls, self): config = { 'stdopt' : self . stdopt , 'attachopt' : self . attachopt , 'attachvalue' : self . attachvalue , 'auto2dashes' : self . auto2dashes , 'case_sensitive' : self . case_sensitive , 'namedoptions' : self . namedoptions , 'appearedonly' : self . appeared_only , 'optionsfirst' : self . options_first , 'option_name' : self . option_name , 'usage_name' : self . usage_name , 'name' : self . name , 'help' : self . help , 'version' : self . version } text = { 'doc' : self . doc , 'usage_text' : self . usage_text , 'option_sections' : self . option_sections , } # option = [convert_2_dict(x) for x in self.options] option = { } for title , options in self . options . items ( ) : option [ title ] = [ convert_2_dict ( x ) for x in options ] usage = [ convert_2_dict ( x ) for x in self . usages ] return { '__version__' : self . _version , '__class__' : 'Docpie' , '__config__' : config , '__text__' : text , 'option' : option , 'usage' : usage , 'option_names' : [ list ( x ) for x in self . opt_names ] , 'opt_names_required_max_args' : self . opt_names_required_max_args }
6,029
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L547-L597
[ "def", "destroy_volume", "(", "agent", ",", "role", ")", ":", "volumes", "=", "[", "]", "agent_id", "=", "agent", "[", "'id'", "]", "reserved_resources_full", "=", "agent", ".", "get", "(", "'reserved_resources_full'", ",", "None", ")", "if", "not", "reserved_resources_full", ":", "# doesn't exist", "return", "True", "reserved_resources", "=", "reserved_resources_full", ".", "get", "(", "role", ",", "None", ")", "if", "not", "reserved_resources", ":", "# doesn't exist", "return", "True", "for", "reserved_resource", "in", "reserved_resources", ":", "name", "=", "reserved_resource", ".", "get", "(", "'name'", ",", "None", ")", "disk", "=", "reserved_resource", ".", "get", "(", "'disk'", ",", "None", ")", "if", "name", "==", "'disk'", "and", "disk", "is", "not", "None", "and", "'persistence'", "in", "disk", ":", "volumes", ".", "append", "(", "reserved_resource", ")", "req_url", "=", "urljoin", "(", "master_url", "(", ")", ",", "'destroy-volumes'", ")", "data", "=", "{", "'slaveId'", ":", "agent_id", ",", "'volumes'", ":", "json", ".", "dumps", "(", "volumes", ")", "}", "success", "=", "False", "try", ":", "response", "=", "http", ".", "post", "(", "req_url", ",", "data", "=", "data", ")", "success", "=", "200", "<=", "response", ".", "status_code", "<", "300", "if", "response", ".", "status_code", "==", "409", ":", "# thoughts on what to do here? throw exception", "# i would rather not print", "print", "(", "'''###\\nIs a framework using these resources still installed?\\n###'''", ")", "except", "DCOSHTTPException", "as", "e", ":", "print", "(", "\"HTTP {}: Unabled to delete volume based on: {}\"", ".", "format", "(", "e", ".", "response", ".", "status_code", ",", "e", ".", "response", ".", "text", ")", ")", "return", "success" ]
Convert dict generated by convert_2_dict into Docpie instance
def from_dict ( cls , dic ) : if '__version__' not in dic : raise ValueError ( 'Not support old docpie data' ) data_version = int ( dic [ '__version__' ] . replace ( '.' , '' ) ) this_version = int ( cls . _version . replace ( '.' , '' ) ) logger . debug ( 'this: %s, old: %s' , this_version , data_version ) if data_version < this_version : raise ValueError ( 'Not support old docpie data' ) assert dic [ '__class__' ] == 'Docpie' config = dic [ '__config__' ] help = config . pop ( 'help' ) version = config . pop ( 'version' ) option_name = config . pop ( 'option_name' ) usage_name = config . pop ( 'usage_name' ) self = cls ( None , * * config ) self . option_name = option_name self . usage_name = usage_name text = dic [ '__text__' ] self . doc = text [ 'doc' ] self . usage_text = text [ 'usage_text' ] self . option_sections = text [ 'option_sections' ] self . opt_names = [ set ( x ) for x in dic [ 'option_names' ] ] self . opt_names_required_max_args = dic [ 'opt_names_required_max_args' ] self . set_config ( help = help , version = version ) self . options = o = { } for title , options in dic [ 'option' ] . items ( ) : opt_ins = [ convert_2_object ( x , { } , self . namedoptions ) for x in options ] o [ title ] = opt_ins self . usages = [ convert_2_object ( x , self . options , self . namedoptions ) for x in dic [ 'usage' ] ] return self
6,030
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L602-L651
[ "def", "acquire", "(", "self", ",", "blocking", "=", "True", ",", "timeout", "=", "None", ")", ":", "if", "timeout", "is", "None", ":", "return", "self", ".", "__lock", ".", "acquire", "(", "blocking", ")", "else", ":", "# Simulated timeout using progressively longer sleeps.", "# This is the same timeout scheme used in the stdlib Condition", "# class. If there's lots of contention on the lock then there's", "# a good chance you won't get it; but then again, Python doesn't", "# guarantee fairness anyway. We hope that platform-specific", "# extensions can provide a better mechanism.", "endtime", "=", "_time", "(", ")", "+", "timeout", "delay", "=", "0.0005", "while", "not", "self", ".", "__lock", ".", "acquire", "(", "False", ")", ":", "remaining", "=", "endtime", "-", "_time", "(", ")", "if", "remaining", "<=", "0", ":", "return", "False", "delay", "=", "min", "(", "delay", "*", "2", ",", "remaining", ",", "0.05", ")", "_sleep", "(", "delay", ")", "return", "True" ]
Shadow all the current config .
def set_config ( self , * * config ) : reinit = False if 'stdopt' in config : stdopt = config . pop ( 'stdopt' ) reinit = ( stdopt != self . stdopt ) self . stdopt = stdopt if 'attachopt' in config : attachopt = config . pop ( 'attachopt' ) reinit = reinit or ( attachopt != self . attachopt ) self . attachopt = attachopt if 'attachvalue' in config : attachvalue = config . pop ( 'attachvalue' ) reinit = reinit or ( attachvalue != self . attachvalue ) self . attachvalue = attachvalue if 'auto2dashes' in config : self . auto2dashes = config . pop ( 'auto2dashes' ) if 'name' in config : name = config . pop ( 'name' ) reinit = reinit or ( name != self . name ) self . name = name if 'help' in config : self . help = config . pop ( 'help' ) self . _set_or_remove_extra_handler ( self . help , ( '--help' , '-h' ) , self . help_handler ) if 'version' in config : self . version = config . pop ( 'version' ) self . _set_or_remove_extra_handler ( self . version is not None , ( '--version' , '-v' ) , self . version_handler ) if 'case_sensitive' in config : case_sensitive = config . pop ( 'case_sensitive' ) reinit = reinit or ( case_sensitive != self . case_sensitive ) self . case_sensitive = case_sensitive if 'optionsfirst' in config : self . options_first = config . pop ( 'optionsfirst' ) if 'appearedonly' in config : self . appeared_only = config . pop ( 'appearedonly' ) if 'namedoptions' in config : namedoptions = config . pop ( 'namedoptions' ) reinit = reinit or ( namedoptions != self . namedoptions ) self . namedoptions = namedoptions if 'extra' in config : self . extra . update ( self . _formal_extra ( config . pop ( 'extra' ) ) ) if config : # should be empty raise ValueError ( '`%s` %s not accepted key argument%s' % ( '`, `' . join ( config ) , 'is' if len ( config ) == 1 else 'are' , '' if len ( config ) == 1 else 's' ) ) if self . doc is not None and reinit : logger . warning ( 'You changed the config that requires re-initialized' ' `Docpie` object. Create a new one instead' ) self . _init ( )
6,031
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L655-L714
[ "def", "revoke_session", "(", "self", ",", "sid", "=", "''", ",", "token", "=", "''", ")", ":", "if", "not", "sid", ":", "if", "token", ":", "sid", "=", "self", ".", "handler", ".", "sid", "(", "token", ")", "else", ":", "raise", "ValueError", "(", "'Need one of \"sid\" or \"token\"'", ")", "for", "typ", "in", "[", "'access_token'", ",", "'refresh_token'", ",", "'code'", "]", ":", "try", ":", "self", ".", "revoke_token", "(", "self", "[", "sid", "]", "[", "typ", "]", ",", "typ", ")", "except", "KeyError", ":", "# If no such token has been issued", "pass", "self", ".", "update", "(", "sid", ",", "revoked", "=", "True", ")" ]
Return alias set of a flag ; return None if flag is not defined in Options .
def find_flag_alias ( self , flag ) : for each in self . opt_names : if flag in each : result = set ( each ) # a copy result . remove ( flag ) return result return None
6,032
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L749-L758
[ "def", "join_struct_arrays", "(", "arrays", ")", ":", "# taken from http://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays", "sizes", "=", "np", ".", "array", "(", "[", "a", ".", "itemsize", "for", "a", "in", "arrays", "]", ")", "offsets", "=", "np", ".", "r_", "[", "0", ",", "sizes", ".", "cumsum", "(", ")", "]", "shape", "=", "arrays", "[", "0", "]", ".", "shape", "joint", "=", "np", ".", "empty", "(", "shape", "+", "(", "offsets", "[", "-", "1", "]", ",", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "for", "a", ",", "size", ",", "offset", "in", "zip", "(", "arrays", ",", "sizes", ",", "offsets", ")", ":", "joint", "[", "...", ",", "offset", ":", "offset", "+", "size", "]", "=", "np", ".", "atleast_1d", "(", "a", ")", ".", "view", "(", "np", ".", "uint8", ")", ".", "reshape", "(", "shape", "+", "(", "size", ",", ")", ")", "dtype", "=", "sum", "(", "(", "a", ".", "dtype", ".", "descr", "for", "a", "in", "arrays", ")", ",", "[", "]", ")", "return", "joint", ".", "ravel", "(", ")", ".", "view", "(", "dtype", ")" ]
Set pre - auto - handler for a flag .
def set_auto_handler ( self , flag , handler ) : assert flag . startswith ( '-' ) and flag not in ( '-' , '--' ) alias = self . find_flag_alias ( flag ) or [ ] self . extra [ flag ] = handler for each in alias : self . extra [ each ] = handler
6,033
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L760-L775
[ "def", "_labeled_uniform_sample", "(", "self", ",", "sample_size", ")", ":", "labeled_entries", "=", "self", ".", "dataset", ".", "get_labeled_entries", "(", ")", "samples", "=", "[", "labeled_entries", "[", "self", ".", "random_state_", ".", "randint", "(", "0", ",", "len", "(", "labeled_entries", ")", ")", "]", "for", "_", "in", "range", "(", "sample_size", ")", "]", "return", "Dataset", "(", "*", "zip", "(", "*", "samples", ")", ")" ]
A quick preview of docpie . Print all the parsed object
def preview ( self , stream = sys . stdout ) : write = stream . write write ( ( '[Quick preview of Docpie %s]' % self . _version ) . center ( 80 , '=' ) ) write ( '\n' ) write ( ' sections ' . center ( 80 , '-' ) ) write ( '\n' ) write ( self . usage_text ) write ( '\n' ) option_sections = self . option_sections if option_sections : write ( '\n' ) write ( '\n' . join ( option_sections . values ( ) ) ) write ( '\n' ) write ( ' str ' . center ( 80 , '-' ) ) write ( '\n[%s]\n' % self . usage_name ) for each in self . usages : write ( ' %s\n' % each ) write ( '\n[Options:]\n\n' ) for title , sections in self . options . items ( ) : if title : full_title = '%s %s' % ( title , self . option_name ) else : full_title = self . option_name write ( full_title ) write ( '\n' ) for each in sections : write ( ' %s\n' % each ) write ( '\n' ) write ( ' repr ' . center ( 80 , '-' ) ) write ( '\n[%s]\n' % self . usage_name ) for each in self . usages : write ( ' %r\n' % each ) write ( '\n[Options:]\n\n' ) for title , sections in self . options . items ( ) : if title : full_title = '%s %s' % ( title , self . option_name ) else : full_title = self . option_name write ( full_title ) write ( '\n' ) for each in sections : write ( ' %r\n' % each ) write ( '\n' ) write ( ' auto handlers ' . center ( 80 , '-' ) ) write ( '\n' ) for key , value in self . extra . items ( ) : write ( '%s %s\n' % ( key , value ) )
6,034
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/pie.py#L777-L837
[ "def", "type", "(", "self", ")", ":", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'reg'", ":", "return", "REGULAR_SEASON", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'ctourn'", ":", "return", "CONFERENCE_TOURNAMENT", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'ncaa'", ":", "return", "NCAA_TOURNAMENT", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'nit'", ":", "return", "NIT_TOURNAMENT", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'cbi'", ":", "return", "CBI_TOURNAMENT", "if", "self", ".", "_type", ".", "lower", "(", ")", "==", "'cit'", ":", "return", "CIT_TOURNAMENT" ]
Query device for all attributes that exist regardless of power state .
def refresh_core ( self ) : self . log . info ( 'Sending out mass query for all attributes' ) for key in ATTR_CORE : self . query ( key )
6,035
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L109-L120
[ "def", "_update_request", "(", "self", ",", "data", ",", "options", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "if", "'headers'", "not", "in", "options", ":", "options", "[", "'headers'", "]", "=", "{", "}", "options", "[", "'headers'", "]", ".", "update", "(", "{", "'Content-type'", ":", "'application/json'", "}", ")", "return", "data", ",", "options" ]
Keep requesting all attributes until it works .
def poweron_refresh ( self ) : if self . _poweron_refresh_successful : return else : self . refresh_all ( ) self . _loop . call_later ( 2 , self . poweron_refresh )
6,036
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L122-L136
[ "def", "create_doc_jar", "(", "self", ",", "target", ",", "open_jar", ",", "version", ")", ":", "javadoc", "=", "self", ".", "_java_doc", "(", "target", ")", "scaladoc", "=", "self", ".", "_scala_doc", "(", "target", ")", "if", "javadoc", "or", "scaladoc", ":", "jar_path", "=", "self", ".", "artifact_path", "(", "open_jar", ",", "version", ",", "suffix", "=", "'-javadoc'", ")", "with", "self", ".", "open_jar", "(", "jar_path", ",", "overwrite", "=", "True", ",", "compressed", "=", "True", ")", "as", "open_jar", ":", "def", "add_docs", "(", "docs", ")", ":", "if", "docs", ":", "for", "basedir", ",", "doc_files", "in", "docs", ".", "items", "(", ")", ":", "for", "doc_file", "in", "doc_files", ":", "open_jar", ".", "write", "(", "os", ".", "path", ".", "join", "(", "basedir", ",", "doc_file", ")", ",", "doc_file", ")", "add_docs", "(", "javadoc", ")", "add_docs", "(", "scaladoc", ")", "return", "jar_path", "else", ":", "return", "None" ]
Query device for all attributes that are known .
def refresh_all ( self ) : self . log . info ( 'refresh_all' ) for key in LOOKUP : self . query ( key )
6,037
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L139-L150
[ "def", "upload", "(", "self", ",", "params", "=", "{", "}", ")", ":", "if", "self", ".", "upload_token", "is", "not", "None", ":", "# resume upload", "status", "=", "self", ".", "check", "(", ")", "if", "status", "[", "'status'", "]", "!=", "4", ":", "return", "self", ".", "commit", "(", ")", "else", ":", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")", "else", ":", "# new upload", "self", ".", "create", "(", "self", ".", "prepare_video_params", "(", "*", "*", "params", ")", ")", "self", ".", "create_file", "(", ")", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")" ]
Called when asyncio . Protocol establishes the network connection .
def connection_made ( self , transport ) : self . log . info ( 'Connection established to AVR' ) self . transport = transport #self.transport.set_write_buffer_limits(0) limit_low , limit_high = self . transport . get_write_buffer_limits ( ) self . log . debug ( 'Write buffer limits %d to %d' , limit_low , limit_high ) self . command ( 'ECH1' ) self . refresh_core ( )
6,038
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L157-L167
[ "def", "delete_unit", "(", "unit_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "db_unit", "=", "db", ".", "DBSession", ".", "query", "(", "Unit", ")", ".", "filter", "(", "Unit", ".", "id", "==", "unit_id", ")", ".", "one", "(", ")", "db", ".", "DBSession", ".", "delete", "(", "db_unit", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "True", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Unit (ID=%s) does not exist\"", "%", "(", "unit_id", ")", ")" ]
Called when asyncio . Protocol detects received data from network .
def data_received ( self , data ) : self . buffer += data . decode ( ) self . log . debug ( 'Received %d bytes from AVR: %s' , len ( self . buffer ) , self . buffer ) self . _assemble_buffer ( )
6,039
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L169-L173
[ "def", "delete_unit", "(", "unit_id", ",", "*", "*", "kwargs", ")", ":", "try", ":", "db_unit", "=", "db", ".", "DBSession", ".", "query", "(", "Unit", ")", ".", "filter", "(", "Unit", ".", "id", "==", "unit_id", ")", ".", "one", "(", ")", "db", ".", "DBSession", ".", "delete", "(", "db_unit", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "True", "except", "NoResultFound", ":", "raise", "ResourceNotFoundError", "(", "\"Unit (ID=%s) does not exist\"", "%", "(", "unit_id", ")", ")" ]
Called when asyncio . Protocol loses the network connection .
def connection_lost ( self , exc ) : if exc is None : self . log . warning ( 'eof from receiver?' ) else : self . log . warning ( 'Lost connection to receiver: %s' , exc ) self . transport = None if self . _connection_lost_callback : self . _loop . call_soon ( self . _connection_lost_callback )
6,040
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L175-L185
[ "def", "_CountStoredAttributeContainers", "(", "self", ",", "container_type", ")", ":", "if", "not", "container_type", "in", "self", ".", "_CONTAINER_TYPES", ":", "raise", "ValueError", "(", "'Attribute container type {0:s} is not supported'", ".", "format", "(", "container_type", ")", ")", "if", "not", "self", ".", "_HasTable", "(", "container_type", ")", ":", "return", "0", "# Note that this is SQLite specific, and will give inaccurate results if", "# there are DELETE commands run on the table. The Plaso SQLite storage", "# implementation does not run any DELETE commands.", "query", "=", "'SELECT MAX(_ROWID_) FROM {0:s} LIMIT 1'", ".", "format", "(", "container_type", ")", "self", ".", "_cursor", ".", "execute", "(", "query", ")", "row", "=", "self", ".", "_cursor", ".", "fetchone", "(", ")", "if", "not", "row", ":", "return", "0", "return", "row", "[", "0", "]", "or", "0" ]
Split up received data from device into individual commands .
def _assemble_buffer ( self ) : self . transport . pause_reading ( ) for message in self . buffer . split ( ';' ) : if message != '' : self . log . debug ( 'assembled message ' + message ) self . _parse_message ( message ) self . buffer = "" self . transport . resume_reading ( ) return
6,041
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L187-L206
[ "def", "_GetPathSegmentIndexForOccurrenceWeights", "(", "self", ",", "occurrence_weights", ",", "value_weights", ")", ":", "largest_weight", "=", "occurrence_weights", ".", "GetLargestWeight", "(", ")", "if", "largest_weight", ">", "0", ":", "occurrence_weight_indexes", "=", "occurrence_weights", ".", "GetIndexesForWeight", "(", "largest_weight", ")", "number_of_occurrence_indexes", "=", "len", "(", "occurrence_weight_indexes", ")", "else", ":", "number_of_occurrence_indexes", "=", "0", "path_segment_index", "=", "None", "if", "number_of_occurrence_indexes", "==", "0", ":", "path_segment_index", "=", "self", ".", "_GetPathSegmentIndexForValueWeights", "(", "value_weights", ")", "elif", "number_of_occurrence_indexes", "==", "1", ":", "path_segment_index", "=", "occurrence_weight_indexes", "[", "0", "]", "else", ":", "largest_weight", "=", "0", "for", "occurrence_index", "in", "occurrence_weight_indexes", ":", "value_weight", "=", "value_weights", ".", "GetWeightForIndex", "(", "occurrence_index", ")", "if", "not", "path_segment_index", "or", "largest_weight", "<", "value_weight", ":", "largest_weight", "=", "value_weight", "path_segment_index", "=", "occurrence_index", "return", "path_segment_index" ]
Request the names for all active configured inputs on the device .
def _populate_inputs ( self , total ) : total = total + 1 for input_number in range ( 1 , total ) : self . query ( 'ISN' + str ( input_number ) . zfill ( 2 ) )
6,042
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L208-L216
[ "def", "_publish_date", "(", "self", ",", "item", ")", ":", "url", "=", "item", "[", "'url'", "]", "html", "=", "deepcopy", "(", "item", "[", "'spider_response'", "]", ".", "body", ")", "publish_date", "=", "None", "try", ":", "if", "html", "is", "None", ":", "request", "=", "urllib2", ".", "Request", "(", "url", ")", "# Using a browser user agent, decreases the change of sites blocking this request - just a suggestion", "# request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)", "# Chrome/41.0.2228.0 Safari/537.36')", "html", "=", "urllib2", ".", "build_opener", "(", ")", ".", "open", "(", "request", ")", ".", "read", "(", ")", "html", "=", "BeautifulSoup", "(", "html", ",", "\"lxml\"", ")", "publish_date", "=", "self", ".", "_extract_from_json", "(", "html", ")", "if", "publish_date", "is", "None", ":", "publish_date", "=", "self", ".", "_extract_from_meta", "(", "html", ")", "if", "publish_date", "is", "None", ":", "publish_date", "=", "self", ".", "_extract_from_html_tag", "(", "html", ")", "if", "publish_date", "is", "None", ":", "publish_date", "=", "self", ".", "_extract_from_url", "(", "url", ")", "except", "Exception", "as", "e", ":", "# print(e.message, e.args)", "pass", "return", "publish_date" ]
Issue a raw formatted command to the device .
def formatted_command ( self , command ) : command = command command = command . encode ( ) self . log . debug ( '> %s' , command ) try : self . transport . write ( command ) time . sleep ( 0.01 ) except : self . log . warning ( 'No transport found, unable to send command' )
6,043
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L354-L378
[ "def", "delete_container_instance_group", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "container_group_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourcegroups/'", ",", "resource_group", ",", "'/providers/Microsoft.ContainerInstance/ContainerGroups/'", ",", "container_group_name", ",", "'?api-version='", ",", "CONTAINER_API", "]", ")", "return", "do_delete", "(", "endpoint", ",", "access_token", ")" ]
Return contents of transport object for debugging forensics .
def dump_rawdata ( self ) : if hasattr ( self , 'transport' ) : attrs = vars ( self . transport ) return ', ' . join ( "%s: %s" % item for item in attrs . items ( ) )
6,044
https://github.com/nugget/python-anthemav/blob/c3cee38f2d452c1ab1335d9885e0769ec24d5f90/anthemav/protocol.py#L852-L856
[ "def", "cancel", "(", "self", ",", "at_period_end", "=", "djstripe_settings", ".", "CANCELLATION_AT_PERIOD_END", ")", ":", "# If plan has trial days and customer cancels before", "# trial period ends, then end subscription now,", "# i.e. at_period_end=False", "if", "self", ".", "trial_end", "and", "self", ".", "trial_end", ">", "timezone", ".", "now", "(", ")", ":", "at_period_end", "=", "False", "if", "at_period_end", ":", "stripe_subscription", "=", "self", ".", "api_retrieve", "(", ")", "stripe_subscription", ".", "cancel_at_period_end", "=", "True", "stripe_subscription", ".", "save", "(", ")", "else", ":", "try", ":", "stripe_subscription", "=", "self", ".", "_api_delete", "(", ")", "except", "InvalidRequestError", "as", "exc", ":", "if", "\"No such subscription:\"", "in", "str", "(", "exc", ")", ":", "# cancel() works by deleting the subscription. The object still", "# exists in Stripe however, and can still be retrieved.", "# If the subscription was already canceled (status=canceled),", "# that api_retrieve() call will fail with \"No such subscription\".", "# However, this may also happen if the subscription legitimately", "# does not exist, in which case the following line will re-raise.", "stripe_subscription", "=", "self", ".", "api_retrieve", "(", ")", "else", ":", "raise", "return", "Subscription", ".", "sync_from_stripe_data", "(", "stripe_subscription", ")" ]
Add a tag or populator to the batch by value and criteria
def add_upsert ( self , value , criteria ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value criteria_array = self . upserts . get ( v ) if criteria_array is None : criteria_array = [ ] # start with # '{"value": "some_value", "criteria": []}, ' self . upserts_size [ v ] = 31 + len ( value ) criteria_array . append ( criteria . to_dict ( ) ) self . upserts [ v ] = criteria_array self . upserts_size [ v ] += criteria . json_size ( )
6,045
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L25-L38
[ "def", "list_all", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "minion", ".", "opts", "[", "'reactor'", "]", ",", "six", ".", "string_types", ")", ":", "log", ".", "debug", "(", "'Reading reactors from yaml %s'", ",", "self", ".", "opts", "[", "'reactor'", "]", ")", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "self", ".", "opts", "[", "'reactor'", "]", ")", "as", "fp_", ":", "react_map", "=", "salt", ".", "utils", ".", "yaml", ".", "safe_load", "(", "fp_", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "log", ".", "error", "(", "'Failed to read reactor map: \"%s\"'", ",", "self", ".", "opts", "[", "'reactor'", "]", ")", "except", "Exception", ":", "log", ".", "error", "(", "'Failed to parse YAML in reactor map: \"%s\"'", ",", "self", ".", "opts", "[", "'reactor'", "]", ")", "else", ":", "log", ".", "debug", "(", "'Not reading reactors from yaml'", ")", "react_map", "=", "self", ".", "minion", ".", "opts", "[", "'reactor'", "]", "return", "react_map" ]
Delete a tag or populator by value - these are processed before upserts
def add_delete ( self , value ) : value = value . strip ( ) v = value . lower ( ) self . lower_val_to_val [ v ] = value if len ( v ) == 0 : raise ValueError ( "Invalid value for delete. Value is empty." ) self . deletes . add ( v )
6,046
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L40-L49
[ "def", "start", "(", "self", ")", ":", "try", ":", "cmd", "=", "[", "self", ".", "path", "]", "cmd", ".", "extend", "(", "self", ".", "command_line_args", "(", ")", ")", "self", ".", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "env", "=", "self", ".", "env", ",", "close_fds", "=", "platform", ".", "system", "(", ")", "!=", "'Windows'", ",", "stdout", "=", "self", ".", "log_file", ",", "stderr", "=", "self", ".", "log_file", ",", "stdin", "=", "PIPE", ")", "except", "TypeError", ":", "raise", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "raise", "WebDriverException", "(", "\"'%s' executable needs to be in PATH. %s\"", "%", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "path", ")", ",", "self", ".", "start_error_message", ")", ")", "elif", "err", ".", "errno", "==", "errno", ".", "EACCES", ":", "raise", "WebDriverException", "(", "\"'%s' executable may have wrong permissions. %s\"", "%", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "path", ")", ",", "self", ".", "start_error_message", ")", ")", "else", ":", "raise", "except", "Exception", "as", "e", ":", "raise", "WebDriverException", "(", "\"The executable %s needs to be available in the path. %s\\n%s\"", "%", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "path", ")", ",", "self", ".", "start_error_message", ",", "str", "(", "e", ")", ")", ")", "count", "=", "0", "while", "True", ":", "self", ".", "assert_process_still_running", "(", ")", "if", "self", ".", "is_connectable", "(", ")", ":", "break", "count", "+=", "1", "time", ".", "sleep", "(", "1", ")", "if", "count", "==", "30", ":", "raise", "WebDriverException", "(", "\"Can not connect to the Service %s\"", "%", "self", ".", "path", ")" ]
Return an array of batch parts to submit
def parts ( self ) : parts = [ ] upserts = dict ( ) deletes = [ ] # we keep track of the batch size as we go (pretty close approximation!) so we can chunk it small enough # to limit the HTTP posts to under 700KB - server limits to 750KB, so play it safe max_upload_size = 700000 # loop upserts first - fit the deletes in afterward # '{"replace_all": true, "complete": false, "guid": "6659fbfc-3f08-42ee-998c-9109f650f4b7", "upserts": [], "deletes": []}' base_part_size = 118 if not self . replace_all : base_part_size += 1 # yeah, this is totally overkill :) part_size = base_part_size for value in self . upserts : if ( part_size + self . upserts_size [ value ] ) >= max_upload_size : # this record would put us over the limit - close out the batch part and start a new one parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size # for the new upserts dict, drop the lower-casing of value upserts [ self . lower_val_to_val [ value ] ] = self . upserts [ value ] part_size += self . upserts_size [ value ] # updating the approximate size of the batch for value in self . deletes : # delete adds length of string plus quotes, comma and space if ( part_size + len ( value ) + 4 ) >= max_upload_size : parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) upserts = dict ( ) deletes = [ ] part_size = base_part_size # for the new deletes set, drop the lower-casing of value deletes . append ( { 'value' : self . lower_val_to_val [ value ] } ) part_size += len ( value ) + 4 if len ( upserts ) + len ( deletes ) > 0 : # finish the batch parts . append ( BatchPart ( self . replace_all , upserts , deletes ) ) if len ( parts ) == 0 : if not self . replace_all : raise ValueError ( "Batch has no data, and 'replace_all' is False" ) parts . append ( BatchPart ( self . replace_all , dict ( ) , [ ] ) ) # last part finishes the batch parts [ - 1 ] . set_last_part ( ) return parts
6,047
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L51-L105
[ "def", "send_message", "(", "self", ",", "message", ")", ":", "try", ":", "if", "_message_test_port", "is", "not", "None", ":", "_message_test_port", ".", "sent", ".", "append", "(", "message", ")", "yield", "message", ".", "send", "(", "self", ")", "except", "(", "WebSocketClosedError", ",", "StreamClosedError", ")", ":", "# Tornado 4.x may raise StreamClosedError", "# on_close() is / will be called anyway", "log", ".", "warning", "(", "\"Failed sending message as connection was closed\"", ")", "raise", "gen", ".", "Return", "(", "None", ")" ]
Build JSON with the input guid
def build_json ( self , guid ) : upserts = [ ] for value in self . upserts : upserts . append ( { "value" : value , "criteria" : self . upserts [ value ] } ) return json . dumps ( { 'replace_all' : self . replace_all , 'guid' : guid , 'complete' : self . complete , 'upserts' : upserts , 'deletes' : self . deletes } )
6,048
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L124-L130
[ "def", "cleanup", "(", "self", ")", ":", "shutil", ".", "rmtree", "(", "self", ".", "temp_tagdir", ")", "# checkout_from_tag might operate on a subdirectory (mostly", "# 'gitclone'), so cleanup the parent dir as well", "parentdir", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "temp_tagdir", ")", "# ensure we don't remove anything important", "if", "os", ".", "path", ".", "basename", "(", "parentdir", ")", ".", "startswith", "(", "self", ".", "package", ")", ":", "os", ".", "rmdir", "(", "parentdir", ")", "os", ".", "chdir", "(", "self", ".", "start_directory", ")" ]
Ensure a non - array field
def _ensure_field ( self , key ) : if self . _has_field : self . _size += 2 # comma, space self . _has_field = True self . _size += len ( key ) + 4
6,049
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L156-L162
[ "def", "read_rcfile", "(", ")", ":", "files", "=", "[", "'{}/.millipederc'", ".", "format", "(", "os", ".", "environ", ".", "get", "(", "'HOME'", ")", ")", ",", "'/usr/local/etc/millipederc'", ",", "'/etc/millipederc'", ",", "]", "for", "filepath", "in", "files", ":", "if", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ")", "as", "rcfile", ":", "return", "parse_rcfile", "(", "rcfile", ")", "return", "{", "}" ]
Ensure an array field
def _ensure_array ( self , key , value ) : if key not in self . _json_dict : self . _json_dict [ key ] = [ ] self . _size += 2 # brackets self . _ensure_field ( key ) if len ( self . _json_dict [ key ] ) > 0 : # this array already has an entry, so add comma and space self . _size += 2 if isinstance ( value , str ) : self . _size += 2 # quotes self . _size += len ( str ( value ) ) self . _json_dict [ key ] . append ( value )
6,050
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L164-L180
[ "def", "parseReaderConfig", "(", "self", ",", "confdict", ")", ":", "logger", ".", "debug", "(", "'parseReaderConfig input: %s'", ",", "confdict", ")", "conf", "=", "{", "}", "for", "k", ",", "v", "in", "confdict", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "'Parameter'", ")", ":", "continue", "ty", "=", "v", "[", "'Type'", "]", "data", "=", "v", "[", "'Data'", "]", "vendor", "=", "None", "subtype", "=", "None", "try", ":", "vendor", ",", "subtype", "=", "v", "[", "'Vendor'", "]", ",", "v", "[", "'Subtype'", "]", "except", "KeyError", ":", "pass", "if", "ty", "==", "1023", ":", "if", "vendor", "==", "25882", "and", "subtype", "==", "37", ":", "tempc", "=", "struct", ".", "unpack", "(", "'!H'", ",", "data", ")", "[", "0", "]", "conf", ".", "update", "(", "temperature", "=", "tempc", ")", "else", ":", "conf", "[", "ty", "]", "=", "data", "return", "conf" ]
Add a single TCP flag - will be OR d into the existing bitmask
def add_tcp_flag ( self , tcp_flag ) : if tcp_flag not in [ 1 , 2 , 4 , 8 , 16 , 32 , 64 , 128 ] : raise ValueError ( "Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]" ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is None : self . _json_dict [ 'tcp_flags' ] = 0 else : prev_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 # str, key, key quotes, colon self . _json_dict [ 'tcp_flags' ] |= tcp_flag # update size new_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 # str, key, key quotes, colon self . _size += new_size - prev_size if prev_size == 0 and self . _has_field : # add the comma and space self . _size += 2 self . _has_field = True
6,051
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L283-L304
[ "def", "volumes_delete", "(", "storage_pool", ",", "logger", ")", ":", "try", ":", "for", "vol_name", "in", "storage_pool", ".", "listVolumes", "(", ")", ":", "try", ":", "vol", "=", "storage_pool", ".", "storageVolLookupByName", "(", "vol_name", ")", "vol", ".", "delete", "(", "0", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volume %s.\"", ",", "vol_name", ")", "except", "libvirt", ".", "libvirtError", ":", "logger", ".", "exception", "(", "\"Unable to delete storage volumes.\"", ")" ]
Set the complete tcp flag bitmask
def set_tcp_flags ( self , tcp_flags ) : if tcp_flags < 0 or tcp_flags > 255 : raise ValueError ( "Invalid tcp_flags. Valid: 0-255." ) prev_size = 0 if self . _json_dict . get ( 'tcp_flags' ) is not None : prev_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 # str, key, key quotes, colon self . _json_dict [ 'tcp_flags' ] = tcp_flags # update size new_size = len ( str ( self . _json_dict [ 'tcp_flags' ] ) ) + len ( 'tcp_flags' ) + 3 # str, key, key quotes, colon self . _size += new_size - prev_size if prev_size == 0 and self . _has_field : # add the comma and space self . _size += 2 self . _has_field = True
6,052
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L306-L325
[ "def", "shapes", "(", "self", ")", ":", "shapes", "=", "[", "]", "curRes", "=", "self", "while", "curRes", "is", "not", "None", ":", "shapes", ".", "append", "(", "curRes", ".", "shape", ")", "curRes", "=", "curRes", ".", "nReservation", "return", "shapes" ]
Submit the batch returning the JSON - > dict from the last HTTP response
def _submit_batch ( self , url , batch ) : # TODO: validate column_name batch_parts = batch . parts ( ) guid = "" headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } # submit each part last_part = dict ( ) for batch_part in batch_parts : # submit resp = requests . post ( url , headers = headers , data = batch_part . build_json ( guid ) ) # print the HTTP response to help debug print ( resp . text ) # break out at first sign of trouble resp . raise_for_status ( ) last_part = resp . json ( ) guid = last_part [ 'guid' ] if guid is None or len ( guid ) == 0 : raise RuntimeError ( 'guid not found in batch response' ) return last_part
6,053
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L400-L429
[ "def", "credentials_match", "(", "self", ",", "user_detail", ",", "key", ")", ":", "if", "user_detail", ":", "creds", "=", "user_detail", ".", "get", "(", "'auth'", ")", "try", ":", "auth_encoder", ",", "creds_dict", "=", "swauth", ".", "authtypes", ".", "validate_creds", "(", "creds", ")", "except", "ValueError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "'%s'", "%", "e", ".", "args", "[", "0", "]", ")", "return", "False", "return", "user_detail", "and", "auth_encoder", ".", "match", "(", "key", ",", "creds", ",", "*", "*", "creds_dict", ")" ]
Submit a populator batch
def submit_populator_batch ( self , column_name , batch ) : if not set ( column_name ) . issubset ( _allowedCustomDimensionChars ) : raise ValueError ( 'Invalid custom dimension name "%s": must only contain letters, digits, and underscores' % column_name ) if len ( column_name ) < 3 or len ( column_name ) > 20 : raise ValueError ( 'Invalid value "%s": must be between 3-20 characters' % column_name ) url = '%s/api/v5/batch/customdimensions/%s/populators' % ( self . base_url , column_name ) resp_json_dict = self . _submit_batch ( url , batch ) if resp_json_dict . get ( 'error' ) is not None : raise RuntimeError ( 'Error received from server: %s' % resp_json_dict [ 'error' ] ) return resp_json_dict [ 'guid' ]
6,054
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L431-L446
[ "def", "search", "(", "name", ")", ":", "name", "=", "six", ".", "text_type", "(", "name", ")", "all_ports", "=", "list_all", "(", ")", "if", "'/'", "in", "name", ":", "if", "name", ".", "count", "(", "'/'", ")", ">", "1", ":", "raise", "SaltInvocationError", "(", "'Invalid search string \\'{0}\\'. Port names cannot have more '", "'than one slash'", ")", "else", ":", "return", "fnmatch", ".", "filter", "(", "all_ports", ",", "name", ")", "else", ":", "ret", "=", "[", "]", "for", "port", "in", "all_ports", ":", "if", "fnmatch", ".", "fnmatch", "(", "port", ".", "rsplit", "(", "'/'", ")", "[", "-", "1", "]", ",", "name", ")", ":", "ret", ".", "append", "(", "port", ")", "return", "ret" ]
Submit a tag batch
def submit_tag_batch ( self , batch ) : url = '%s/api/v5/batch/tags' % self . base_url self . _submit_batch ( url , batch )
6,055
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L448-L451
[ "def", "recv", "(", "self", ",", "timeout", "=", "None", ")", ":", "if", "timeout", ":", "try", ":", "testsock", "=", "self", ".", "_zmq", ".", "select", "(", "[", "self", ".", "socket", "]", ",", "[", "]", ",", "[", "]", ",", "timeout", ")", "[", "0", "]", "except", "zmq", ".", "ZMQError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EINTR", ":", "testsock", "=", "None", "else", ":", "raise", "if", "not", "testsock", ":", "return", "rv", "=", "self", ".", "socket", ".", "recv", "(", "self", ".", "_zmq", ".", "NOBLOCK", ")", "return", "LogRecord", ".", "from_dict", "(", "json", ".", "loads", "(", "rv", ")", ")", "else", ":", "return", "super", "(", "ZeroMQPullSubscriber", ",", "self", ")", ".", "recv", "(", "timeout", ")" ]
Fetch the status of a batch given the guid
def fetch_batch_status ( self , guid ) : url = '%s/api/v5/batch/%s/status' % ( self . base_url , guid ) headers = { 'User-Agent' : 'kentik-python-api/0.1' , 'Content-Type' : 'application/json' , 'X-CH-Auth-Email' : self . api_email , 'X-CH-Auth-API-Token' : self . api_token } resp = requests . get ( url , headers = headers ) # break out at first sign of trouble resp . raise_for_status ( ) return BatchResponse ( guid , resp . json ( ) )
6,056
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L453-L467
[ "def", "delete_table", "(", "self", ",", "schema", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "connection", "(", "*", "*", "kwargs", ")", "as", "connection", ":", "kwargs", "[", "'connection'", "]", "=", "connection", "if", "not", "self", ".", "has_table", "(", "str", "(", "schema", ")", ",", "*", "*", "kwargs", ")", ":", "return", "True", "with", "self", ".", "transaction", "(", "*", "*", "kwargs", ")", ":", "self", ".", "_delete_table", "(", "schema", ",", "*", "*", "kwargs", ")", "return", "True" ]
reads files off disk resizes them and then predicts them files should be a list or itrerable of file paths that lead to images they are then loaded with opencv resized and predicted
def predict_files ( self , files ) : imgs = [ 0 ] * len ( files ) for i , file in enumerate ( files ) : img = cv2 . imread ( file ) . astype ( 'float64' ) img = cv2 . resize ( img , ( 224 , 224 ) ) img = preprocess_input ( img ) if img is None : print ( 'failed to open: {}, continuing...' . format ( file ) ) imgs [ i ] = img return self . model . predict ( np . array ( imgs ) )
6,057
https://github.com/SMAPPNYU/pysmap/blob/eb871992f40c53125129535e871525d5623c8c2d/pysmap/mltools/crowd_model.py#L44-L60
[ "def", "exception", "(", "self", ",", "url", ",", "exception", ")", ":", "return", "(", "time", ".", "time", "(", ")", "+", "self", ".", "ttl", ",", "self", ".", "factory", "(", "url", ")", ")" ]
Rename genome and slugify headers
def rename_genome ( genome_in , genome_out = None ) : if genome_out is None : genome_out = "{}_renamed.fa" . format ( genome_in . split ( "." ) [ 0 ] ) with open ( genome_out , "w" ) as output_handle : for record in SeqIO . parse ( genome_in , "fasta" ) : # Replace hyphens, tabs and whitespace with underscores new_record_id = record . id . replace ( " " , "_" ) new_record_id = new_record_id . replace ( "-" , "_" ) new_record_id = new_record_id . replace ( "\t" , "_" ) # Remove anything that's weird, i.e. not alphanumeric # or an underscore new_record_id = re . sub ( "[^_A-Za-z0-9]+" , "" , new_record_id ) header = ">{}\n" . format ( new_record_id ) output_handle . write ( header ) output_handle . write ( "{}\n" . format ( str ( record . seq ) ) )
6,058
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L18-L50
[ "def", "_create_download_failed_message", "(", "exception", ",", "url", ")", ":", "message", "=", "'Failed to download from:\\n{}\\nwith {}:\\n{}'", ".", "format", "(", "url", ",", "exception", ".", "__class__", ".", "__name__", ",", "exception", ")", "if", "_is_temporal_problem", "(", "exception", ")", ":", "if", "isinstance", "(", "exception", ",", "requests", ".", "ConnectionError", ")", ":", "message", "+=", "'\\nPlease check your internet connection and try again.'", "else", ":", "message", "+=", "'\\nThere might be a problem in connection or the server failed to process '", "'your request. Please try again.'", "elif", "isinstance", "(", "exception", ",", "requests", ".", "HTTPError", ")", ":", "try", ":", "server_message", "=", "''", "for", "elem", "in", "decode_data", "(", "exception", ".", "response", ".", "content", ",", "MimeType", ".", "XML", ")", ":", "if", "'ServiceException'", "in", "elem", ".", "tag", "or", "'Message'", "in", "elem", ".", "tag", ":", "server_message", "+=", "elem", ".", "text", ".", "strip", "(", "'\\n\\t '", ")", "except", "ElementTree", ".", "ParseError", ":", "server_message", "=", "exception", ".", "response", ".", "text", "message", "+=", "'\\nServer response: \"{}\"'", ".", "format", "(", "server_message", ")", "return", "message" ]
Filter fasta file according to various parameters .
def filter_genome ( genome_in , threshold = 500 , list_records = None ) : if list_records is None : def truth ( * args ) : del args return True is_a_record_to_keep = truth else : try : with open ( list_records ) as records_handle : records_to_keep = records_handle . readlines ( ) except OSError : if not hasattr ( list_records , "__contains__" ) : raise else : records_to_keep = list_records is_a_record_to_keep = records_to_keep . __contains__ records_to_write = ( record for record in SeqIO . parse ( genome_in , "fasta" ) if ( len ( record . seq ) >= threshold and is_a_record_to_keep ( record . id ) ) ) return records_to_write
6,059
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L53-L103
[ "def", "enter_alternate_screen", "(", "self", ")", ":", "if", "not", "self", ".", "_in_alternate_screen", ":", "GENERIC_READ", "=", "0x80000000", "GENERIC_WRITE", "=", "0x40000000", "# Create a new console buffer and activate that one.", "handle", "=", "self", ".", "_winapi", "(", "windll", ".", "kernel32", ".", "CreateConsoleScreenBuffer", ",", "GENERIC_READ", "|", "GENERIC_WRITE", ",", "DWORD", "(", "0", ")", ",", "None", ",", "DWORD", "(", "1", ")", ",", "None", ")", "self", ".", "_winapi", "(", "windll", ".", "kernel32", ".", "SetConsoleActiveScreenBuffer", ",", "handle", ")", "self", ".", "hconsole", "=", "handle", "self", ".", "_in_alternate_screen", "=", "True" ]
Rename prodigal output files
def rename_proteins ( prot_in , prot_out = None , chunk_size = DEFAULT_CHUNK_SIZE ) : if prot_out is None : prot_out = "{}_renamed.fa" . format ( prot_in . split ( "." ) [ 0 ] ) with open ( prot_out , "w" ) as prot_out_handle : for record in SeqIO . parse ( prot_in , "fasta" ) : header = record . description name , pos_start , _ , _ , _ = header . split ( "#" ) chunk_start = int ( pos_start ) // chunk_size name_split = name . split ( "_" ) contig_name = "_" . join ( name_split [ : - 1 ] ) gene_id = name_split [ - 1 ] new_record_id = "{}_{}__gene{}" . format ( contig_name , chunk_start , gene_id ) prot_out_handle . write ( ">{}\n" . format ( new_record_id ) ) prot_out_handle . write ( "{}\n" . format ( str ( record . seq ) ) )
6,060
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L106-L144
[ "def", "features_keep_using_features", "(", "obj", ",", "bounds", ")", ":", "# Build an R-tree index of bound features and their shapes.", "bounds_shapes", "=", "[", "(", "feature", ",", "shapely", ".", "geometry", ".", "shape", "(", "feature", "[", "'geometry'", "]", ")", ")", "for", "feature", "in", "tqdm", "(", "bounds", "[", "'features'", "]", ")", "if", "feature", "[", "'geometry'", "]", "is", "not", "None", "]", "index", "=", "rtree", ".", "index", ".", "Index", "(", ")", "for", "i", "in", "tqdm", "(", "range", "(", "len", "(", "bounds_shapes", ")", ")", ")", ":", "(", "feature", ",", "shape", ")", "=", "bounds_shapes", "[", "i", "]", "index", ".", "insert", "(", "i", ",", "shape", ".", "bounds", ")", "features_keep", "=", "[", "]", "for", "feature", "in", "tqdm", "(", "obj", "[", "'features'", "]", ")", ":", "if", "'geometry'", "in", "feature", "and", "'coordinates'", "in", "feature", "[", "'geometry'", "]", ":", "coordinates", "=", "feature", "[", "'geometry'", "]", "[", "'coordinates'", "]", "if", "any", "(", "[", "shape", ".", "contains", "(", "shapely", ".", "geometry", ".", "Point", "(", "lon", ",", "lat", ")", ")", "for", "(", "lon", ",", "lat", ")", "in", "coordinates", "for", "(", "feature", ",", "shape", ")", "in", "[", "bounds_shapes", "[", "i", "]", "for", "i", "in", "index", ".", "nearest", "(", "(", "lon", ",", "lat", ",", "lon", ",", "lat", ")", ",", "1", ")", "]", "]", ")", ":", "features_keep", ".", "append", "(", "feature", ")", "continue", "obj", "[", "'features'", "]", "=", "features_keep", "return", "obj" ]
Write FASTA records
def write_records ( records , output_file , split = False ) : if split : for record in records : with open ( "{}{}.fa" . format ( output_file , record . id ) , "w" ) as record_handle : SeqIO . write ( record , record_handle , "fasta" ) else : SeqIO . write ( records , output_file , "fasta" )
6,061
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/fasta_utils.py#L147-L171
[ "def", "get_stats_display_width", "(", "self", ",", "curse_msg", ",", "without_option", "=", "False", ")", ":", "try", ":", "if", "without_option", ":", "# Size without options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "(", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "if", "not", "i", "[", "'optional'", "]", "else", "\"\"", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "else", ":", "# Size with all options", "c", "=", "len", "(", "max", "(", "''", ".", "join", "(", "[", "u", "(", "u", "(", "nativestr", "(", "i", "[", "'msg'", "]", ")", ")", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ")", "for", "i", "in", "curse_msg", "[", "'msgdict'", "]", "]", ")", ".", "split", "(", "'\\n'", ")", ",", "key", "=", "len", ")", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'ERROR: Can not compute plugin width ({})'", ".", "format", "(", "e", ")", ")", "return", "0", "else", ":", "return", "c" ]
Add a sample to this series .
def add_sample ( self , * * data ) : missing_dimensions = set ( data ) . difference ( self . dimensions ) if missing_dimensions : raise KeyError ( 'Dimensions not defined in this series: %s' % ', ' . join ( missing_dimensions ) ) for dim in self . dimensions : getattr ( self , dim ) . append ( data . get ( dim ) )
6,062
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/process.py#L158-L167
[ "def", "cublasStrmm", "(", "handle", ",", "side", ",", "uplo", ",", "trans", ",", "diag", ",", "m", ",", "n", ",", "alpha", ",", "A", ",", "lda", ",", "B", ",", "ldb", ",", "C", ",", "ldc", ")", ":", "status", "=", "_libcublas", ".", "cublasStrmm_v2", "(", "handle", ",", "_CUBLAS_SIDE_MODE", "[", "side", "]", ",", "_CUBLAS_FILL_MODE", "[", "uplo", "]", ",", "_CUBLAS_OP", "[", "trans", "]", ",", "_CUBLAS_DIAG", "[", "diag", "]", ",", "m", ",", "n", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_float", "(", "alpha", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "B", ")", ",", "ldb", ",", "int", "(", "C", ")", ",", "ldc", ")", "cublasCheckStatus", "(", "status", ")" ]
Yield the samples as dicts keyed by dimensions .
def samples ( self ) : names = self . series . dimensions for values in zip ( * ( getattr ( self . series , name ) for name in names ) ) : yield dict ( zip ( names , values ) )
6,063
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/process.py#L298-L302
[ "def", "_is_requirement", "(", "line", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "return", "line", "and", "not", "(", "line", ".", "startswith", "(", "\"-r\"", ")", "or", "line", ".", "startswith", "(", "\"#\"", ")", ")" ]
Create path to filename and saves binary data
def write_binary ( filename , data ) : dir = os . path . dirname ( filename ) if not os . path . exists ( dir ) : os . makedirs ( dir ) with open ( filename , 'wb' ) as f : f . write ( data )
6,064
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L22-L28
[ "def", "_retry", "(", "function", ")", ":", "def", "inner", "(", "self", ",", "*", "*", "kwargs", ")", ":", "last_exception", "=", "None", "#for host in self.router.get_hosts(**kwargs):", "for", "host", "in", "self", ".", "host", ":", "try", ":", "return", "function", "(", "self", ",", "host", ",", "*", "*", "kwargs", ")", "except", "SolrError", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "e", ")", "raise", "except", "ConnectionError", "as", "e", ":", "self", ".", "logger", ".", "exception", "(", "\"Tried connecting to Solr, but couldn't because of the following exception.\"", ")", "if", "'401'", "in", "e", ".", "__str__", "(", ")", ":", "raise", "last_exception", "=", "e", "# raise the last exception after contacting all hosts instead of returning None", "if", "last_exception", "is", "not", "None", ":", "raise", "last_exception", "return", "inner" ]
Returns generator that contains filenames from root directory and ends with suffix
def files_with_exts ( root = '.' , suffix = '' ) : return ( os . path . join ( rootdir , filename ) for rootdir , dirnames , filenames in os . walk ( root ) for filename in filenames if filename . endswith ( suffix ) )
6,065
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L37-L44
[ "def", "groupReadOnlyViews", "(", "self", ",", "person", ")", ":", "# this is a slightly awkward, specific API, but at the time of", "# writing, read-only views are the thing that the only caller cares", "# about. we need the contact type to get a read-only view for a", "# contact item. there is no way to get from a contact item to a", "# contact type, so this method can't be \"groupContactItems\" (which", "# seems to make more sense), unless it returned some weird data", "# structure which managed to associate contact items and contact", "# types.", "grouped", "=", "{", "}", "for", "contactType", "in", "self", ".", "getContactTypes", "(", ")", ":", "for", "contactItem", "in", "contactType", ".", "getContactItems", "(", "person", ")", ":", "contactGroup", "=", "contactType", ".", "getContactGroup", "(", "contactItem", ")", "if", "contactGroup", "is", "not", "None", ":", "contactGroup", "=", "contactGroup", ".", "groupName", "if", "contactGroup", "not", "in", "grouped", ":", "grouped", "[", "contactGroup", "]", "=", "[", "]", "grouped", "[", "contactGroup", "]", ".", "append", "(", "contactType", ".", "getReadOnlyView", "(", "contactItem", ")", ")", "return", "grouped" ]
Finds TinyPNG API key
def find_apikey ( ) : env_keys = [ 'TINYPNG_APIKEY' , 'TINYPNG_API_KEY' ] paths = [ ] paths . append ( os . path . join ( os . path . abspath ( "." ) , "tinypng.key" ) ) # local directory paths . append ( os . path . expanduser ( "~/.tinypng.key" ) ) # home directory for env_key in env_keys : if os . environ . get ( env_key ) : return os . environ . get ( env_key ) for path in paths : if os . path . exists ( path ) : return open ( path , 'rt' ) . read ( ) . strip ( ) return None
6,066
https://github.com/vasilcovsky/pytinypng/blob/ac633e4aa41122c49a806f411e43a76d8f73058e/pytinypng/utils.py#L67-L91
[ "def", "devices", "(", "self", ",", "timeout", "=", "None", ")", ":", "# b313b945 device usb:1-7 product:d2vzw model:SCH_I535 device:d2vzw", "# from Android system/core/adb/transport.c statename()", "re_device_info", "=", "re", ".", "compile", "(", "r'([^\\s]+)\\s+(offline|bootloader|device|host|recovery|sideload|no permissions|unauthorized|unknown)'", ")", "devices", "=", "[", "]", "lines", "=", "self", ".", "command_output", "(", "[", "\"devices\"", ",", "\"-l\"", "]", ",", "timeout", "=", "timeout", ")", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "line", "==", "'List of devices attached '", ":", "continue", "match", "=", "re_device_info", ".", "match", "(", "line", ")", "if", "match", ":", "device", "=", "{", "'device_serial'", ":", "match", ".", "group", "(", "1", ")", ",", "'state'", ":", "match", ".", "group", "(", "2", ")", "}", "remainder", "=", "line", "[", "match", ".", "end", "(", "2", ")", ":", "]", ".", "strip", "(", ")", "if", "remainder", ":", "try", ":", "device", ".", "update", "(", "dict", "(", "[", "j", ".", "split", "(", "':'", ")", "for", "j", "in", "remainder", ".", "split", "(", "' '", ")", "]", ")", ")", "except", "ValueError", ":", "self", ".", "_logger", ".", "warning", "(", "'devices: Unable to parse '", "'remainder for device %s'", "%", "line", ")", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
Compare two RPM strings to determine which is newer
def compare_packages ( rpm_str_a , rpm_str_b , arch_provided = True ) : logger . debug ( 'resolve_versions(%s, %s)' , rpm_str_a , rpm_str_b ) evr_a = parse_package ( rpm_str_a , arch_provided ) [ 'EVR' ] evr_b = parse_package ( rpm_str_b , arch_provided ) [ 'EVR' ] return labelCompare ( evr_a , evr_b )
6,067
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L41-L65
[ "def", "get_experiment_in_group", "(", "self", ",", "group", ",", "bucketing_id", ")", ":", "experiment_id", "=", "self", ".", "bucketer", ".", "find_bucket", "(", "bucketing_id", ",", "group", ".", "id", ",", "group", ".", "trafficAllocation", ")", "if", "experiment_id", ":", "experiment", "=", "self", ".", "config", ".", "get_experiment_from_id", "(", "experiment_id", ")", "if", "experiment", ":", "self", ".", "logger", ".", "info", "(", "'User with bucketing ID \"%s\" is in experiment %s of group %s.'", "%", "(", "bucketing_id", ",", "experiment", ".", "key", ",", "group", ".", "id", ")", ")", "return", "experiment", "self", ".", "logger", ".", "info", "(", "'User with bucketing ID \"%s\" is not in any experiments of group %s.'", "%", "(", "bucketing_id", ",", "group", ".", "id", ")", ")", "return", "None" ]
Compare two EVR tuples to determine which is newer
def compare_evrs ( evr_a , evr_b ) : a_epoch , a_ver , a_rel = evr_a b_epoch , b_ver , b_rel = evr_b if a_epoch != b_epoch : return a_newer if a_epoch > b_epoch else b_newer ver_comp = compare_versions ( a_ver , b_ver ) if ver_comp != a_eq_b : return ver_comp rel_comp = compare_versions ( a_rel , b_rel ) return rel_comp
6,068
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L68-L90
[ "async", "def", "_unsubscribe", "(", "self", ",", "channels", ",", "is_mask", ")", ":", "vanished", "=", "[", "]", "if", "channels", ":", "for", "channel", "in", "channels", ":", "key", "=", "channel", ",", "is_mask", "self", ".", "_channels", ".", "remove", "(", "key", ")", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", ".", "remove", "(", "self", ".", "_queue", ")", "if", "not", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", ":", "# we were last sub?", "vanished", ".", "append", "(", "channel", ")", "del", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", "else", ":", "while", "self", ".", "_channels", ":", "channel", ",", "is_mask", "=", "key", "=", "self", ".", "_channels", ".", "pop", "(", ")", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", ".", "remove", "(", "self", ".", "_queue", ")", "if", "not", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", ":", "vanished", ".", "append", "(", "channel", ")", "del", "self", ".", "_plugin", ".", "_subscriptions", "[", "key", "]", "if", "vanished", ":", "await", "getattr", "(", "self", ".", "_sub", ",", "'punsubscribe'", "if", "is_mask", "else", "'unsubscribe'", ")", "(", "vanished", ")" ]
Compare two RPM version strings
def compare_versions ( version_a , version_b ) : logger . debug ( 'compare_versions(%s, %s)' , version_a , version_b ) if version_a == version_b : return a_eq_b try : chars_a , chars_b = list ( version_a ) , list ( version_b ) except TypeError : raise RpmError ( 'Could not compare {0} to ' '{1}' . format ( version_a , version_b ) ) while len ( chars_a ) != 0 and len ( chars_b ) != 0 : logger . debug ( 'starting loop comparing %s ' 'to %s' , chars_a , chars_b ) _check_leading ( chars_a , chars_b ) if chars_a [ 0 ] == '~' and chars_b [ 0 ] == '~' : map ( lambda x : x . pop ( 0 ) , ( chars_a , chars_b ) ) elif chars_a [ 0 ] == '~' : return b_newer elif chars_b [ 0 ] == '~' : return a_newer if len ( chars_a ) == 0 or len ( chars_b ) == 0 : break block_res = _get_block_result ( chars_a , chars_b ) if block_res != a_eq_b : return block_res if len ( chars_a ) == len ( chars_b ) : logger . debug ( 'versions are equal' ) return a_eq_b else : logger . debug ( 'versions not equal' ) return a_newer if len ( chars_a ) > len ( chars_b ) else b_newer
6,069
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L115-L185
[ "def", "generate_timeline", "(", "usnjrnl", ",", "filesystem_content", ")", ":", "journal_content", "=", "defaultdict", "(", "list", ")", "for", "event", "in", "usnjrnl", ":", "journal_content", "[", "event", ".", "inode", "]", ".", "append", "(", "event", ")", "for", "event", "in", "usnjrnl", ":", "try", ":", "dirent", "=", "lookup_dirent", "(", "event", ",", "filesystem_content", ",", "journal_content", ")", "yield", "UsnJrnlEvent", "(", "dirent", ".", "inode", ",", "dirent", ".", "path", ",", "dirent", ".", "size", ",", "dirent", ".", "allocated", ",", "event", ".", "timestamp", ",", "event", ".", "changes", ",", "event", ".", "attributes", ")", "except", "LookupError", "as", "error", ":", "LOGGER", ".", "debug", "(", "error", ")" ]
Parse an RPM version string
def package ( package_string , arch_included = True ) : logger . debug ( 'package(%s, %s)' , package_string , arch_included ) pkg_info = parse_package ( package_string , arch_included ) pkg = Package ( pkg_info [ 'name' ] , pkg_info [ 'EVR' ] [ 0 ] , pkg_info [ 'EVR' ] [ 1 ] , pkg_info [ 'EVR' ] [ 2 ] , pkg_info [ 'arch' ] , package_str = package_string ) return pkg
6,070
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L188-L209
[ "def", "generate_timeline", "(", "usnjrnl", ",", "filesystem_content", ")", ":", "journal_content", "=", "defaultdict", "(", "list", ")", "for", "event", "in", "usnjrnl", ":", "journal_content", "[", "event", ".", "inode", "]", ".", "append", "(", "event", ")", "for", "event", "in", "usnjrnl", ":", "try", ":", "dirent", "=", "lookup_dirent", "(", "event", ",", "filesystem_content", ",", "journal_content", ")", "yield", "UsnJrnlEvent", "(", "dirent", ".", "inode", ",", "dirent", ".", "path", ",", "dirent", ".", "size", ",", "dirent", ".", "allocated", ",", "event", ".", "timestamp", ",", "event", ".", "changes", ",", "event", ".", "attributes", ")", "except", "LookupError", "as", "error", ":", "LOGGER", ".", "debug", "(", "error", ")" ]
Parse an RPM version string to get name version and arch
def parse_package ( package_string , arch_included = True ) : # Yum sets epoch values to 0 if they are not specified logger . debug ( 'parse_package(%s, %s)' , package_string , arch_included ) default_epoch = '0' arch = None if arch_included : char_list = list ( package_string ) arch = _pop_arch ( char_list ) package_string = '' . join ( char_list ) logger . debug ( 'updated version_string: %s' , package_string ) try : name , epoch , version , release = _rpm_re . match ( package_string ) . groups ( ) except AttributeError : raise RpmError ( 'Could not parse package string: %s' % package_string ) if epoch == '' or epoch is None : epoch = default_epoch info = { 'name' : name , 'EVR' : ( epoch , version , release ) , 'arch' : arch } logger . debug ( 'parsed information: %s' , info ) return info
6,071
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L212-L252
[ "def", "set_guest_access", "(", "self", ",", "allow_guests", ")", ":", "guest_access", "=", "\"can_join\"", "if", "allow_guests", "else", "\"forbidden\"", "try", ":", "self", ".", "client", ".", "api", ".", "set_guest_access", "(", "self", ".", "room_id", ",", "guest_access", ")", "self", ".", "guest_access", "=", "allow_guests", "return", "True", "except", "MatrixRequestError", ":", "return", "False" ]
Pop the architecture from a version string and return it
def _pop_arch ( char_list ) : logger . debug ( '_pop_arch(%s)' , char_list ) arch_list = [ ] char = char_list . pop ( ) while char != '.' : arch_list . insert ( 0 , char ) try : char = char_list . pop ( ) except IndexError : # Raised for a string with no periods raise RpmError ( 'Could not parse an architecture. Did you mean to ' 'set the arch_included flag to False?' ) logger . debug ( 'arch chars: %s' , arch_list ) return '' . join ( arch_list )
6,072
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L255-L276
[ "def", "metricCompute", "(", "self", ",", "sensorToBody", ",", "bodyToSpecificObject", ")", ":", "overlaps", "=", "self", ".", "metricConnections", ".", "computeActivity", "(", "{", "\"bodyToSpecificObject\"", ":", "bodyToSpecificObject", ",", "\"sensorToBody\"", ":", "sensorToBody", ",", "}", ")", "self", ".", "activeMetricSegments", "=", "np", ".", "where", "(", "overlaps", ">=", "2", ")", "[", "0", "]", "self", ".", "activeCells", "=", "np", ".", "unique", "(", "self", ".", "metricConnections", ".", "mapSegmentsToCells", "(", "self", ".", "activeMetricSegments", ")", ")" ]
Remove any non - alphanumeric or non - ~ leading characters
def _check_leading ( * char_lists ) : logger . debug ( '_check_leading(%s)' , char_lists ) for char_list in char_lists : while ( len ( char_list ) != 0 and not char_list [ 0 ] . isalnum ( ) and not char_list [ 0 ] == '~' ) : char_list . pop ( 0 ) logger . debug ( 'updated list: %s' , char_list )
6,073
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L279-L295
[ "def", "wait_turrets", "(", "self", ",", "wait_for", ")", ":", "print", "(", "\"Waiting for %d turrets\"", "%", "(", "wait_for", "-", "len", "(", "self", ".", "turrets_manager", ".", "turrets", ")", ")", ")", "while", "len", "(", "self", ".", "turrets_manager", ".", "turrets", ")", "<", "wait_for", ":", "self", ".", "turrets_manager", ".", "status_request", "(", ")", "socks", "=", "dict", "(", "self", ".", "poller", ".", "poll", "(", "2000", ")", ")", "if", "self", ".", "result_collector", "in", "socks", ":", "data", "=", "self", ".", "result_collector", ".", "recv_json", "(", ")", "self", ".", "turrets_manager", ".", "process_message", "(", "data", ")", "print", "(", "\"Waiting for %d turrets\"", "%", "(", "wait_for", "-", "len", "(", "self", ".", "turrets_manager", ".", "turrets", ")", ")", ")" ]
Trim any zeros from provided character lists
def _trim_zeros ( * char_lists ) : logger . debug ( '_trim_zeros(%s)' , char_lists ) for char_list in char_lists : while len ( char_list ) != 0 and char_list [ 0 ] == '0' : char_list . pop ( 0 ) logger . debug ( 'updated block: %s' , char_list )
6,074
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L298-L313
[ "def", "getAssociation", "(", "self", ",", "server_url", ",", "handle", "=", "None", ")", ":", "if", "handle", "is", "None", ":", "handle", "=", "''", "# The filename with the empty handle is a prefix of all other", "# associations for the given server URL.", "filename", "=", "self", ".", "getAssociationFilename", "(", "server_url", ",", "handle", ")", "if", "handle", ":", "return", "self", ".", "_getAssociation", "(", "filename", ")", "else", ":", "association_files", "=", "os", ".", "listdir", "(", "self", ".", "association_dir", ")", "matching_files", "=", "[", "]", "# strip off the path to do the comparison", "name", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "for", "association_file", "in", "association_files", ":", "if", "association_file", ".", "startswith", "(", "name", ")", ":", "matching_files", ".", "append", "(", "association_file", ")", "matching_associations", "=", "[", "]", "# read the matching files and sort by time issued", "for", "name", "in", "matching_files", ":", "full_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "association_dir", ",", "name", ")", "association", "=", "self", ".", "_getAssociation", "(", "full_name", ")", "if", "association", "is", "not", "None", ":", "matching_associations", ".", "append", "(", "(", "association", ".", "issued", ",", "association", ")", ")", "matching_associations", ".", "sort", "(", ")", "# return the most recently issued one.", "if", "matching_associations", ":", "(", "_", ",", "assoc", ")", "=", "matching_associations", "[", "-", "1", "]", "return", "assoc", "else", ":", "return", "None" ]
Pop consecutive digits from the front of list and return them
def _pop_digits ( char_list ) : logger . debug ( '_pop_digits(%s)' , char_list ) digits = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isdigit ( ) : digits . append ( char_list . pop ( 0 ) ) logger . debug ( 'got digits: %s' , digits ) logger . debug ( 'updated char list: %s' , char_list ) return digits
6,075
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L316-L333
[ "def", "_link_dimensioned_streams", "(", "self", ")", ":", "streams", "=", "[", "s", "for", "s", "in", "self", ".", "streams", "if", "any", "(", "k", "in", "self", ".", "dimensions", "for", "k", "in", "s", ".", "contents", ")", "]", "for", "s", "in", "streams", ":", "s", ".", "add_subscriber", "(", "self", ".", "_stream_update", ",", "1", ")" ]
Pop consecutive letters from the front of a list and return them
def _pop_letters ( char_list ) : logger . debug ( '_pop_letters(%s)' , char_list ) letters = [ ] while len ( char_list ) != 0 and char_list [ 0 ] . isalpha ( ) : letters . append ( char_list . pop ( 0 ) ) logger . debug ( 'got letters: %s' , letters ) logger . debug ( 'updated char list: %s' , char_list ) return letters
6,076
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L336-L353
[ "def", "connect", "(", "cls", ",", "dbname", ")", ":", "test_times_schema", "=", "\"\"\"\n CREATE TABLE IF NOT EXISTS test_times (\n file text,\n module text,\n class text,\n func text,\n elapsed float\n )\n \"\"\"", "setup_times_schema", "=", "\"\"\"\n CREATE TABLE IF NOT EXISTS setup_times (\n file text,\n module text,\n class text,\n func text,\n elapsed float\n )\n \"\"\"", "schemas", "=", "[", "test_times_schema", ",", "setup_times_schema", "]", "db_file", "=", "'{}.db'", ".", "format", "(", "dbname", ")", "cls", ".", "connection", "=", "sqlite3", ".", "connect", "(", "db_file", ")", "for", "s", "in", "schemas", ":", "cls", ".", "connection", ".", "execute", "(", "s", ")" ]
Compare two blocks of characters
def _compare_blocks ( block_a , block_b ) : logger . debug ( '_compare_blocks(%s, %s)' , block_a , block_b ) if block_a [ 0 ] . isdigit ( ) : _trim_zeros ( block_a , block_b ) if len ( block_a ) != len ( block_b ) : logger . debug ( 'block lengths are not equal' ) return a_newer if len ( block_a ) > len ( block_b ) else b_newer if block_a == block_b : logger . debug ( 'blocks are equal' ) return a_eq_b else : logger . debug ( 'blocks are not equal' ) return a_newer if block_a > block_b else b_newer
6,077
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L356-L393
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Get the first block from two character lists and compare
def _get_block_result ( chars_a , chars_b ) : logger . debug ( '_get_block_result(%s, %s)' , chars_a , chars_b ) first_is_digit = chars_a [ 0 ] . isdigit ( ) pop_func = _pop_digits if first_is_digit else _pop_letters return_if_no_b = a_newer if first_is_digit else b_newer block_a , block_b = pop_func ( chars_a ) , pop_func ( chars_b ) if len ( block_b ) == 0 : logger . debug ( 'blocks are equal' ) return return_if_no_b return _compare_blocks ( block_a , block_b )
6,078
https://github.com/ihiji/version_utils/blob/7f63d80faca8e76274b6e8dff7637cc7cb8d848c/version_utils/rpm.py#L396-L430
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
list all channels
def list_ ( * , cursor : str = None , exclude_archived : bool = None , exclude_members : bool = None , limit : int = None ) -> snug . Query [ Page [ t . List [ Channel ] ] ] : kwargs = { 'exclude_archived' : exclude_archived , 'exclude_members' : exclude_members , 'limit' : limit } response = yield { 'cursor' : cursor , * * kwargs } try : next_cursor = response [ 'response_metadata' ] [ 'next_cursor' ] except KeyError : next_query = None else : next_query = list_ ( * * kwargs , cursor = next_cursor ) return Page ( load_channel_list ( response [ 'channels' ] ) , next_query = next_query , )
6,079
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/channels.py#L14-L34
[ "def", "run_latex", "(", "self", ",", "cmd_wo_infile", ",", "path_outfile", ")", ":", "# Generate path variables", "text_template", "=", "self", ".", "get_text_template", "(", ")", "path_template_random", "=", "random_name_filepath", "(", "self", ".", "path_template", ")", "path_template_dir", "=", "os", ".", "path", ".", "dirname", "(", "path_template_random", ")", "path_template_random_no_ext", "=", "os", ".", "path", ".", "splitext", "(", "path_template_random", ")", "[", "0", "]", "path_template_random_aux", "=", "path_template_random_no_ext", "+", "\".aux\"", "ext_outfile", "=", "os", ".", "path", ".", "splitext", "(", "path_outfile", ")", "[", "-", "1", "]", "path_outfile_initial", "=", "\"{}{}\"", ".", "format", "(", "path_template_random_no_ext", ",", "ext_outfile", ")", "# Handle special case of MS Word", "if", "cmd_wo_infile", "[", "0", "]", "==", "'latex2rtf'", "and", "len", "(", "cmd_wo_infile", ")", "==", "1", ":", "cmd_docx", "=", "cmd_wo_infile", "+", "[", "'-o'", ",", "path_outfile_initial", "]", "# Need to run pdf2latex to generate aux file", "cmd_wo_infile", "=", "[", "'pdflatex'", "]", "else", ":", "cmd_docx", "=", "None", "try", ":", "# Write template variable to a temporary file", "with", "open", "(", "path_template_random", ",", "'w'", ")", "as", "temp_file", ":", "temp_file", ".", "write", "(", "text_template", ")", "cmd", "=", "cmd_wo_infile", "+", "[", "path_template_random", "]", "old_aux", ",", "new_aux", "=", "random_str_uuid", "(", "1", ")", ",", "random_str_uuid", "(", "2", ")", "while", "old_aux", "!=", "new_aux", ":", "# Run the relevant Latex command until old aux != new aux", "stdout", "=", "check_output_cwd", "(", "cmd", ",", "path_template_dir", ")", "LOGGER", ".", "debug", "(", "'\\n'", ".", "join", "(", "stdout", ")", ")", "old_aux", ",", "new_aux", "=", "new_aux", ",", "read_file", "(", "path_template_random_aux", ")", "# Handle special case of MS Word", "if", "cmd_docx", ":", "cmd_word", "=", "cmd_docx", "+", "[", "path_template_random", "]", "stdout", "=", "check_output_cwd", "(", "cmd_word", ",", "path_template_dir", ")", "LOGGER", ".", "debug", "(", "'\\n'", ".", "join", "(", "stdout", ")", ")", "shutil", ".", "move", "(", "path_outfile_initial", ",", "path_outfile", ")", "LOGGER", ".", "info", "(", "\"Built {} from {}\"", ".", "format", "(", "path_outfile", ",", "self", ".", "path_template", ")", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "\"Failed during latex build\"", ")", "finally", ":", "# Clean up all temporary files associated with the", "# random file identifier for the process files", "path_gen", "=", "list_filepathes_with_predicate", "(", "path_template_dir", ",", "path_template_random_no_ext", ")", "for", "path_gen_file", "in", "path_gen", ":", "os", ".", "remove", "(", "path_gen_file", ")", "return", "text_template" ]
create a new channel
def create ( name : str , * , validate : bool = None ) -> snug . Query [ Channel ] : return { 'name' : name , 'validate' : validate }
6,080
https://github.com/ariebovenberg/snug/blob/4f5cd30e6b7b2c3f0ad3cc10be865bd8900b38ef/examples/slack/channels.py#L38-L41
[ "def", "_search", "(", "prefix", "=", "\"latest/\"", ")", ":", "ret", "=", "{", "}", "linedata", "=", "http", ".", "query", "(", "os", ".", "path", ".", "join", "(", "HOST", ",", "prefix", ")", ",", "headers", "=", "True", ")", "if", "'body'", "not", "in", "linedata", ":", "return", "ret", "body", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "linedata", "[", "'body'", "]", ")", "if", "linedata", "[", "'headers'", "]", ".", "get", "(", "'Content-Type'", ",", "'text/plain'", ")", "==", "'application/octet-stream'", ":", "return", "body", "for", "line", "in", "body", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "endswith", "(", "'/'", ")", ":", "ret", "[", "line", "[", ":", "-", "1", "]", "]", "=", "_search", "(", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "line", ")", ")", "elif", "prefix", "==", "'latest/'", ":", "# (gtmanfred) The first level should have a forward slash since", "# they have stuff underneath. This will not be doubled up though,", "# because lines ending with a slash are checked first.", "ret", "[", "line", "]", "=", "_search", "(", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "line", "+", "'/'", ")", ")", "elif", "line", ".", "endswith", "(", "(", "'dynamic'", ",", "'meta-data'", ")", ")", ":", "ret", "[", "line", "]", "=", "_search", "(", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "line", ")", ")", "elif", "'='", "in", "line", ":", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ")", "ret", "[", "value", "]", "=", "_search", "(", "prefix", "=", "os", ".", "path", ".", "join", "(", "prefix", ",", "key", ")", ")", "else", ":", "retdata", "=", "http", ".", "query", "(", "os", ".", "path", ".", "join", "(", "HOST", ",", "prefix", ",", "line", ")", ")", ".", "get", "(", "'body'", ",", "None", ")", "# (gtmanfred) This try except block is slightly faster than", "# checking if the string starts with a curly brace", "if", "isinstance", "(", "retdata", ",", "six", ".", "binary_type", ")", ":", "try", ":", "ret", "[", "line", "]", "=", "salt", ".", "utils", ".", "json", ".", "loads", "(", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "retdata", ")", ")", "except", "ValueError", ":", "ret", "[", "line", "]", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "retdata", ")", "else", ":", "ret", "[", "line", "]", "=", "retdata", "return", "salt", ".", "utils", ".", "data", ".", "decode", "(", "ret", ")" ]
Returns tube by its name
def tube ( self , name ) : if name in self . _tubes : return self . _tubes [ name ] assert name , 'Tube name must be specified' t = self . _tube_cls ( self , name ) self . _tubes [ name ] = t return t
6,081
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/queue.py#L58-L72
[ "def", "update", "(", "request", ")", ":", "session", "=", "yield", "from", "app", ".", "ps", ".", "session", ".", "load", "(", "request", ")", "session", "[", "'random'", "]", "=", "random", ".", "random", "(", ")", "return", "session" ]
Returns a JSON MeasurementPayload ready to be send through a transport .
def device_measurement ( device , ts = None , part = None , result = None , code = None , * * kwargs ) : if ts is None : ts = local_now ( ) payload = MeasurementPayload ( device = device , part = part ) m = Measurement ( ts , result , code , list ( kwargs ) ) payload . measurements . append ( m ) m . add_sample ( ts , * * kwargs ) return dumps ( payload )
6,082
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L272-L303
[ "def", "on_redis_error", "(", "self", ",", "fname", ",", "exc_type", ",", "exc_value", ")", ":", "if", "self", ".", "shared_client", ":", "Storage", ".", "storage", "=", "None", "else", ":", "self", ".", "storage", "=", "None", "if", "self", ".", "context", ".", "config", ".", "REDIS_STORAGE_IGNORE_ERRORS", "is", "True", ":", "logger", ".", "error", "(", "\"[REDIS_STORAGE] %s\"", "%", "exc_value", ")", "if", "fname", "==", "'_exists'", ":", "return", "False", "return", "None", "else", ":", "raise", "exc_value" ]
Add a sample to this measurements .
def add_sample ( self , ts , * * kwargs ) : if not self . series . offsets : self . ts = ts offset = 0 else : dt = ts - self . ts offset = ( dt . days * 24 * 60 * 60 * 1000 + dt . seconds * 1000 + dt . microseconds // 1000 ) self . series . add_sample ( offset , * * kwargs )
6,083
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L203-L212
[ "def", "share_file_with_link", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "perms", "=", "kwargs", ".", "get", "(", "'perms'", ",", "None", ")", "public_upload", "=", "kwargs", ".", "get", "(", "'public_upload'", ",", "'false'", ")", "password", "=", "kwargs", ".", "get", "(", "'password'", ",", "None", ")", "path", "=", "self", ".", "_normalize_path", "(", "path", ")", "post_data", "=", "{", "'shareType'", ":", "self", ".", "OCS_SHARE_TYPE_LINK", ",", "'path'", ":", "self", ".", "_encode_string", "(", "path", ")", ",", "}", "if", "(", "public_upload", "is", "not", "None", ")", "and", "(", "isinstance", "(", "public_upload", ",", "bool", ")", ")", ":", "post_data", "[", "'publicUpload'", "]", "=", "str", "(", "public_upload", ")", ".", "lower", "(", ")", "if", "isinstance", "(", "password", ",", "six", ".", "string_types", ")", ":", "post_data", "[", "'password'", "]", "=", "password", "if", "perms", ":", "post_data", "[", "'permissions'", "]", "=", "perms", "res", "=", "self", ".", "_make_ocs_request", "(", "'POST'", ",", "self", ".", "OCS_SERVICE_SHARE", ",", "'shares'", ",", "data", "=", "post_data", ")", "if", "res", ".", "status_code", "==", "200", ":", "tree", "=", "ET", ".", "fromstring", "(", "res", ".", "content", ")", "self", ".", "_check_ocs_status", "(", "tree", ")", "data_el", "=", "tree", ".", "find", "(", "'data'", ")", "return", "ShareInfo", "(", "{", "'id'", ":", "data_el", ".", "find", "(", "'id'", ")", ".", "text", ",", "'path'", ":", "path", ",", "'url'", ":", "data_el", ".", "find", "(", "'url'", ")", ".", "text", ",", "'token'", ":", "data_el", ".", "find", "(", "'token'", ")", ".", "text", "}", ")", "raise", "HTTPResponseError", "(", "res", ")" ]
Yield samples as dictionaries keyed by dimensions .
def samples ( self ) : names = self . series . dimensions for n , offset in enumerate ( self . series . offsets ) : dt = datetime . timedelta ( microseconds = offset * 1000 ) d = { "ts" : self . ts + dt } for name in names : d [ name ] = getattr ( self . series , name ) [ n ] yield d
6,084
https://github.com/eclipse/unide.python/blob/b82e6a0bf7cc44a463c5d7cdb3d2199f8320c493/src/unide/measurement.py#L214-L222
[ "def", "csrf_protect_all_post_and_cross_origin_requests", "(", ")", ":", "success", "=", "None", "if", "is_cross_origin", "(", "request", ")", ":", "logger", ".", "warning", "(", "\"Received cross origin request. Aborting\"", ")", "abort", "(", "403", ")", "if", "request", ".", "method", "in", "[", "\"POST\"", ",", "\"PUT\"", "]", ":", "token", "=", "session", ".", "get", "(", "\"csrf_token\"", ")", "if", "token", "==", "request", ".", "form", ".", "get", "(", "\"csrf_token\"", ")", ":", "return", "success", "elif", "token", "==", "request", ".", "environ", ".", "get", "(", "\"HTTP_X_CSRFTOKEN\"", ")", ":", "return", "success", "else", ":", "logger", ".", "warning", "(", "\"Received invalid csrf token. Aborting\"", ")", "abort", "(", "403", ")" ]
Determine the format of an audio file .
def determine_format ( data , extension = None ) : if isinstance ( data , ( os . PathLike , str ) ) : data = open ( data , 'rb' ) data_reader = DataReader ( data ) data_reader . seek ( 0 , os . SEEK_SET ) d = data_reader . read ( 4 ) if d . startswith ( ( b'ID3' , b'\xFF\xFB' ) ) : # TODO: Catch all MP3 possibilities. if extension is None or extension . endswith ( '.mp3' ) : return MP3 if d . startswith ( ( b'fLaC' , b'ID3' ) ) : if extension is None or extension . endswith ( '.flac' ) : return FLAC if d . startswith ( b'RIFF' ) : if extension is None or extension . endswith ( '.wav' ) : return WAV return None
6,085
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L14-L45
[ "def", "configure_url", "(", "url", ")", ":", "app", ".", "config", "[", "'COIL_URL'", "]", "=", "_site", ".", "config", "[", "'SITE_URL'", "]", "=", "_site", ".", "config", "[", "'BASE_URL'", "]", "=", "_site", ".", "GLOBAL_CONTEXT", "[", "'blog_url'", "]", "=", "site", ".", "config", "[", "'SITE_URL'", "]", "=", "site", ".", "config", "[", "'BASE_URL'", "]", "=", "url" ]
Load audio metadata from filepath or file - like object .
def load ( f ) : if isinstance ( f , ( os . PathLike , str ) ) : fileobj = open ( f , 'rb' ) else : try : f . read ( 0 ) except AttributeError : raise ValueError ( "Not a valid file-like object." ) except Exception : raise ValueError ( "Can't read from file-like object." ) fileobj = f parser_cls = determine_format ( fileobj , os . path . splitext ( fileobj . name ) [ 1 ] ) if parser_cls is None : raise UnsupportedFormat ( "Supported format signature not found." ) else : fileobj . seek ( 0 , os . SEEK_SET ) return parser_cls . load ( fileobj )
6,086
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L48-L82
[ "def", "get_worker", "(", "*", "queue_names", ",", "*", "*", "kwargs", ")", ":", "job_class", "=", "get_job_class", "(", "kwargs", ".", "pop", "(", "'job_class'", ",", "None", ")", ")", "queue_class", "=", "kwargs", ".", "pop", "(", "'queue_class'", ",", "None", ")", "queues", "=", "get_queues", "(", "*", "queue_names", ",", "*", "*", "{", "'job_class'", ":", "job_class", ",", "'queue_class'", ":", "queue_class", "}", ")", "# normalize queue_class to what get_queues returns", "queue_class", "=", "queues", "[", "0", "]", ".", "__class__", "worker_class", "=", "get_worker_class", "(", "kwargs", ".", "pop", "(", "'worker_class'", ",", "None", ")", ")", "return", "worker_class", "(", "queues", ",", "connection", "=", "queues", "[", "0", "]", ".", "connection", ",", "exception_handlers", "=", "get_exception_handlers", "(", ")", "or", "None", ",", "job_class", "=", "job_class", ",", "queue_class", "=", "queue_class", ",", "*", "*", "kwargs", ")" ]
Load audio metadata from a bytes - like object .
def loads ( b ) : parser_cls = determine_format ( b ) if parser_cls is None : raise UnsupportedFormat ( "Supported format signature not found." ) return parser_cls . load ( b )
6,087
https://github.com/thebigmunch/audio-metadata/blob/d17bdbdb71db79c1568d54438d42dcd940b76074/src/audio_metadata/api.py#L85-L103
[ "def", "_get_required_fn", "(", "fn", ",", "root_path", ")", ":", "if", "not", "fn", ".", "startswith", "(", "root_path", ")", ":", "raise", "ValueError", "(", "\"Both paths have to be absolute or local!\"", ")", "replacer", "=", "\"/\"", "if", "root_path", ".", "endswith", "(", "\"/\"", ")", "else", "\"\"", "return", "fn", ".", "replace", "(", "root_path", ",", "replacer", ",", "1", ")" ]
method for finding specific types of notation from nodes . will currently return the first one it encounters because this method s only really intended for some types of notation for which the exact value doesn t really matter .
def Find ( self , node_type , item_type ) : if node_type == OtherNodes . DirectionNode : child = self . GetChild ( len ( self . children ) - 1 ) while child is not None and not isinstance ( child . GetItem ( ) , item_type ) : if child . GetItem ( ) . __class__ . __name__ == item_type . __name__ : return True child = child . GetChild ( 0 ) if node_type == OtherNodes . ExpressionNode : child = self . GetChild ( len ( self . children ) - 2 ) while child is not None and not isinstance ( child . GetItem ( ) , item_type ) : if child . GetItem ( ) . __class__ . __name__ == item_type . __name__ : return True child = child . GetChild ( 0 )
6,088
https://github.com/Godley/MuseParse/blob/23cecafa1fdc0f2d6a87760553572b459f3c9904/MuseParse/classes/ObjectHierarchy/TreeClasses/NoteNode.py#L41-L71
[ "def", "copy_resume", "(", "src_uri", ",", "dest_base_uri", ",", "config_path", "=", "None", ",", "progressbar", "=", "None", ")", ":", "dataset", "=", "DataSet", ".", "from_uri", "(", "src_uri", ")", "# Generate the URI of the destination proto dataset.", "dest_uri", "=", "_generate_uri", "(", "dataset", ".", "_admin_metadata", ",", "dest_base_uri", ")", "proto_dataset", "=", "ProtoDataSet", ".", "from_uri", "(", "dest_uri", ")", "_copy_content", "(", "dataset", ",", "proto_dataset", ",", "progressbar", ")", "proto_dataset", ".", "freeze", "(", "progressbar", "=", "progressbar", ")", "return", "proto_dataset", ".", "uri" ]
Count how many lines in a pure text file .
def count_lines ( abspath ) : with open ( abspath , "rb" ) as f : i = 0 for line in f : i += 1 pass return i
6,089
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/lines_count.py#L21-L29
[ "def", "get_logins", "(", "self", ",", "user_id", ",", "start_date", "=", "None", ")", ":", "if", "start_date", "is", "None", ":", "date_object", "=", "datetime", ".", "datetime", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", "start_date", "=", "date_object", ".", "strftime", "(", "\"%m/%d/%Y 0:0:0\"", ")", "date_filter", "=", "{", "'loginAttempts'", ":", "{", "'createDate'", ":", "{", "'operation'", ":", "'greaterThanDate'", ",", "'options'", ":", "[", "{", "'name'", ":", "'date'", ",", "'value'", ":", "[", "start_date", "]", "}", "]", "}", "}", "}", "login_log", "=", "self", ".", "user_service", ".", "getLoginAttempts", "(", "id", "=", "user_id", ",", "filter", "=", "date_filter", ")", "return", "login_log" ]
Lines count of selected files under a directory .
def lines_stats ( dir_path , file_filter ) : n_files = 0 n_lines = 0 for p in Path ( dir_path ) . select_file ( file_filter ) : n_files += 1 n_lines += count_lines ( p . abspath ) return n_files , n_lines
6,090
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/lines_count.py#L32-L43
[ "def", "_zm_request", "(", "self", ",", "method", ",", "api_url", ",", "data", "=", "None", ",", "timeout", "=", "DEFAULT_TIMEOUT", ")", "->", "dict", ":", "try", ":", "# Since the API uses sessions that expire, sometimes we need to", "# re-auth if the call fails.", "for", "_", "in", "range", "(", "ZoneMinder", ".", "LOGIN_RETRIES", ")", ":", "req", "=", "requests", ".", "request", "(", "method", ",", "urljoin", "(", "self", ".", "_server_url", ",", "api_url", ")", ",", "data", "=", "data", ",", "cookies", "=", "self", ".", "_cookies", ",", "timeout", "=", "timeout", ",", "verify", "=", "self", ".", "_verify_ssl", ")", "if", "not", "req", ".", "ok", ":", "self", ".", "login", "(", ")", "else", ":", "break", "else", ":", "_LOGGER", ".", "error", "(", "'Unable to get API response from ZoneMinder'", ")", "try", ":", "return", "req", ".", "json", "(", ")", "except", "ValueError", ":", "_LOGGER", ".", "exception", "(", "'JSON decode exception caught while'", "'attempting to decode \"%s\"'", ",", "req", ".", "text", ")", "return", "{", "}", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "_LOGGER", ".", "exception", "(", "'Unable to connect to ZoneMinder'", ")", "return", "{", "}" ]
get Usage section and set to raw_content formal_content of no title and empty - line version
def parse_content ( self , text ) : match = re . search ( self . usage_re_str . format ( self . usage_name ) , text , flags = ( re . DOTALL if self . case_sensitive else ( re . DOTALL | re . IGNORECASE ) ) ) if match is None : return dic = match . groupdict ( ) logger . debug ( dic ) self . raw_content = dic [ 'raw' ] if dic [ 'sep' ] in ( '\n' , '\r\n' ) : self . formal_content = dic [ 'section' ] return reallen = len ( dic [ 'name' ] ) replace = '' . ljust ( reallen ) drop_name = match . expand ( '%s\\g<sep>\\g<section>' % replace ) self . formal_content = self . drop_started_empty_lines ( drop_name ) . rstrip ( )
6,091
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/parser.py#L772-L795
[ "def", "show_progress", "(", "total_duration", ")", ":", "with", "tqdm", "(", "total", "=", "round", "(", "total_duration", ",", "2", ")", ")", "as", "bar", ":", "def", "handler", "(", "key", ",", "value", ")", ":", "if", "key", "==", "'out_time_ms'", ":", "time", "=", "round", "(", "float", "(", "value", ")", "/", "1000000.", ",", "2", ")", "bar", ".", "update", "(", "time", "-", "bar", ".", "n", ")", "elif", "key", "==", "'progress'", "and", "value", "==", "'end'", ":", "bar", ".", "update", "(", "bar", ".", "total", "-", "bar", ".", "n", ")", "with", "_watch_progress", "(", "handler", ")", "as", "socket_filename", ":", "yield", "socket_filename" ]
Draw a pretty plot from an array
def spaceless_pdf_plot_maker ( array , filename , vmax = None , dpi = DEFAULT_DPI ) : if vmax is None : vmax = np . percentile ( array , DEFAULT_SATURATION_THRESHOLD ) plt . gca ( ) . set_axis_off ( ) plt . subplots_adjust ( top = 1 , bottom = 0 , right = 1 , left = 0 , hspace = 0 , wspace = 0 ) plt . margins ( 0 , 0 ) plt . gca ( ) . xaxis . set_major_locator ( plt . NullLocator ( ) ) plt . gca ( ) . yaxis . set_major_locator ( plt . NullLocator ( ) ) plt . figure ( ) if SEABORN : sns . heatmap ( array , vmax = vmax , cmap = "Reds" ) else : plt . imshow ( array , vmax = vmax , cmap = "Reds" , interpolation = "none" ) plt . colorbar ( ) plt . savefig ( filename , bbox_inches = "tight" , pad_inches = 0.0 , dpi = dpi ) plt . close ( )
6,092
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/figures.py#L38-L72
[ "def", "restart", "(", "self", ",", "timeout", "=", "None", ")", ":", "msg", "=", "{", "\"value\"", ":", "\"Restart requested by \"", "+", "self", ".", "username", "+", "\"via the Splunk SDK for Python\"", "}", "# This message will be deleted once the server actually restarts.", "self", ".", "messages", ".", "create", "(", "name", "=", "\"restart_required\"", ",", "*", "*", "msg", ")", "result", "=", "self", ".", "post", "(", "\"server/control/restart\"", ")", "if", "timeout", "is", "None", ":", "return", "result", "start", "=", "datetime", ".", "now", "(", ")", "diff", "=", "timedelta", "(", "seconds", "=", "timeout", ")", "while", "datetime", ".", "now", "(", ")", "-", "start", "<", "diff", ":", "try", ":", "self", ".", "login", "(", ")", "if", "not", "self", ".", "restart_required", ":", "return", "result", "except", "Exception", "as", "e", ":", "sleep", "(", "1", ")", "raise", "Exception", "(", "\"Operation time out.\"", ")" ]
Draw a quick preview of a sparse matrix with automated binning and normalization .
def draw_sparse_matrix ( array_filename , output_image , vmax = DEFAULT_SATURATION_THRESHOLD , max_size_matrix = DEFAULT_MAX_SIZE_MATRIX , ) : matrix = np . loadtxt ( array_filename , dtype = np . int32 , skiprows = 1 ) try : row , col , data = matrix . T except ValueError : row , col , data = matrix size = max ( np . amax ( row ) , np . amax ( col ) ) + 1 S = sparse . coo_matrix ( ( data , ( row , col ) ) , shape = ( size , size ) ) if max_size_matrix <= 0 : binning = 1 else : binning = ( size // max_size_matrix ) + 1 binned_S = hcs . bin_sparse ( S , subsampling_factor = binning ) dense_S = binned_S . todense ( ) dense_S = dense_S + dense_S . T - np . diag ( np . diag ( dense_S ) ) normed_S = hcs . normalize_dense ( dense_S ) spaceless_pdf_plot_maker ( normed_S , output_image , vmax = vmax )
6,093
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/figures.py#L75-L100
[ "def", "make_or_pipe", "(", "pipe", ")", ":", "p1", "=", "OrPipe", "(", "pipe", ")", "p2", "=", "OrPipe", "(", "pipe", ")", "p1", ".", "_partner", "=", "p2", "p2", ".", "_partner", "=", "p1", "return", "p1", ",", "p2" ]
Returns the nth item or a default value .
def nth ( iterable , n , default = None ) : return next ( itertools . islice ( iterable , n , None ) , default )
6,094
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L65-L82
[ "def", "delete_types_s", "(", "s", ",", "types", ")", ":", "patt", "=", "'(?s)'", "+", "'|'", ".", "join", "(", "'(?<=\\n)'", "+", "s", "+", "'\\n.+?\\n(?=\\S+|$)'", "for", "s", "in", "types", ")", "return", "re", ".", "sub", "(", "patt", ",", "''", ",", "'\\n'", "+", "s", ".", "strip", "(", ")", "+", "'\\n'", ",", ")", ".", "strip", "(", ")" ]
Return last n items of the iterable as a list .
def pull ( iterable , n ) : fifo = collections . deque ( maxlen = n ) for i in iterable : fifo . append ( i ) return list ( fifo )
6,095
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L102-L119
[ "def", "send_velocity_world_setpoint", "(", "self", ",", "vx", ",", "vy", ",", "vz", ",", "yawrate", ")", ":", "pk", "=", "CRTPPacket", "(", ")", "pk", ".", "port", "=", "CRTPPort", ".", "COMMANDER_GENERIC", "pk", ".", "data", "=", "struct", ".", "pack", "(", "'<Bffff'", ",", "TYPE_VELOCITY_WORLD", ",", "vx", ",", "vy", ",", "vz", ",", "yawrate", ")", "self", ".", "_cf", ".", "send_packet", "(", "pk", ")" ]
Generate n - size running window .
def running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) for i in iterable : fifo . append ( i ) if len ( fifo ) == size : yield list ( fifo )
6,096
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L215-L237
[ "def", "fetch_additional_posts", "(", "self", ",", "uid", ")", ":", "cats", "=", "MPost2Catalog", ".", "query_by_entity_uid", "(", "uid", ",", "kind", "=", "self", ".", "kind", ")", "cat_uid_arr", "=", "[", "]", "for", "cat_rec", "in", "cats", ":", "cat_uid", "=", "cat_rec", ".", "tag_id", "cat_uid_arr", ".", "append", "(", "cat_uid", ")", "logger", ".", "info", "(", "'info category: {0}'", ".", "format", "(", "cat_uid_arr", ")", ")", "rel_recs", "=", "MRelation", ".", "get_app_relations", "(", "uid", ",", "8", ",", "kind", "=", "self", ".", "kind", ")", ".", "objects", "(", ")", "logger", ".", "info", "(", "'rel_recs count: {0}'", ".", "format", "(", "rel_recs", ".", "count", "(", ")", ")", ")", "if", "cat_uid_arr", ":", "rand_recs", "=", "MPost", ".", "query_cat_random", "(", "cat_uid_arr", "[", "0", "]", ",", "limit", "=", "4", "-", "rel_recs", ".", "count", "(", ")", "+", "4", ")", "else", ":", "rand_recs", "=", "MPost", ".", "query_random", "(", "num", "=", "4", "-", "rel_recs", ".", "count", "(", ")", "+", "4", ",", "kind", "=", "self", ".", "kind", ")", "return", "rand_recs", ",", "rel_recs" ]
Generate n - size cycle running window .
def cycle_running_window ( iterable , size ) : if size > len ( iterable ) : raise ValueError ( "size can not be greater than length of iterable." ) fifo = collections . deque ( maxlen = size ) cycle = itertools . cycle ( iterable ) counter = itertools . count ( 1 ) length = len ( iterable ) for i in cycle : fifo . append ( i ) if len ( fifo ) == size : yield list ( fifo ) if next ( counter ) == length : break
6,097
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L240-L269
[ "def", "update_group_alias", "(", "self", ",", "entity_id", ",", "name", ",", "mount_accessor", "=", "\"\"", ",", "canonical_id", "=", "\"\"", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'name'", ":", "name", ",", "'mount_accessor'", ":", "mount_accessor", ",", "'canonical_id'", ":", "canonical_id", ",", "}", "api_path", "=", "'/v1/{mount_point}/group-alias/id/{id}'", ".", "format", "(", "mount_point", "=", "mount_point", ",", "id", "=", "entity_id", ",", ")", "return", "self", ".", "_adapter", ".", "post", "(", "url", "=", "api_path", ",", "json", "=", "params", ",", ")" ]
Shift and trim unneeded item .
def shift_and_trim ( array , dist ) : length = len ( array ) if length == 0 : return [ ] if ( dist >= length ) or ( dist <= - length ) : return [ ] elif dist < 0 : return array [ - dist : ] elif dist > 0 : return array [ : - dist ] else : return list ( array )
6,098
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L351-L386
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Shift and pad with item .
def shift_and_pad ( array , dist , pad = "__null__" ) : length = len ( array ) if length == 0 : return [ ] if pad == "__null__" : if dist > 0 : padding_item = array [ 0 ] elif dist < 0 : padding_item = array [ - 1 ] else : padding_item = None else : padding_item = pad if abs ( dist ) >= length : return length * [ padding_item , ] elif dist == 0 : return list ( array ) elif dist > 0 : return [ padding_item , ] * dist + array [ : - dist ] elif dist < 0 : return array [ - dist : ] + [ padding_item , ] * - dist else : # Never get in this logic raise Exception
6,099
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/iterable.py#L389-L440
[ "def", "resolve_remote", "(", "self", ",", "uri", ")", ":", "if", "uri", ".", "startswith", "(", "'file://'", ")", ":", "try", ":", "path", "=", "uri", "[", "7", ":", "]", "with", "open", "(", "path", ",", "'r'", ")", "as", "schema_file", ":", "result", "=", "yaml", ".", "load", "(", "schema_file", ")", "if", "self", ".", "cache_remote", ":", "self", ".", "store", "[", "uri", "]", "=", "result", "return", "result", "except", "yaml", ".", "parser", ".", "ParserError", "as", "e", ":", "logging", ".", "debug", "(", "'Error parsing {!r} as YAML: {}'", ".", "format", "(", "uri", ",", "e", ")", ")", "return", "super", "(", "SchemaRefResolver", ",", "self", ")", ".", "resolve_remote", "(", "uri", ")" ]