query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Make docutils nodes containing a cross - reference to a Python object .
def make_python_xref_nodes ( py_typestr , state , hide_namespace = False ) : if hide_namespace : template = ':py:obj:`~{}`\n' else : template = ':py:obj:`{}`\n' xref_text = template . format ( py_typestr ) return parse_rst_content ( xref_text , state )
11,700
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L44-L83
[ "def", "Find", "(", "cls", ",", "setting_matcher", ",", "port_path", "=", "None", ",", "serial", "=", "None", ",", "timeout_ms", "=", "None", ")", ":", "if", "port_path", ":", "device_matcher", "=", "cls", ".", "PortPathMatcher", "(", "port_path", ")", "usb_info", "=", "port_path", "elif", "serial", ":", "device_matcher", "=", "cls", ".", "SerialMatcher", "(", "serial", ")", "usb_info", "=", "serial", "else", ":", "device_matcher", "=", "None", "usb_info", "=", "'first'", "return", "cls", ".", "FindFirst", "(", "setting_matcher", ",", "device_matcher", ",", "usb_info", "=", "usb_info", ",", "timeout_ms", "=", "timeout_ms", ")" ]
Make docutils nodes containing a cross - reference to a Python object given the object s type .
def make_python_xref_nodes_for_type ( py_type , state , hide_namespace = False ) : if py_type . __module__ == 'builtins' : typestr = py_type . __name__ else : typestr = '.' . join ( ( py_type . __module__ , py_type . __name__ ) ) return make_python_xref_nodes ( typestr , state , hide_namespace = hide_namespace )
11,701
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L86-L126
[ "def", "Find", "(", "cls", ",", "setting_matcher", ",", "port_path", "=", "None", ",", "serial", "=", "None", ",", "timeout_ms", "=", "None", ")", ":", "if", "port_path", ":", "device_matcher", "=", "cls", ".", "PortPathMatcher", "(", "port_path", ")", "usb_info", "=", "port_path", "elif", "serial", ":", "device_matcher", "=", "cls", ".", "SerialMatcher", "(", "serial", ")", "usb_info", "=", "serial", "else", ":", "device_matcher", "=", "None", "usb_info", "=", "'first'", "return", "cls", ".", "FindFirst", "(", "setting_matcher", ",", "device_matcher", ",", "usb_info", "=", "usb_info", ",", "timeout_ms", "=", "timeout_ms", ")" ]
Make a docutils section node .
def make_section ( section_id = None , contents = None ) : section = nodes . section ( ) section [ 'ids' ] . append ( nodes . make_id ( section_id ) ) section [ 'names' ] . append ( section_id ) if contents is not None : section . extend ( contents ) return section
11,702
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L129-L150
[ "def", "_HashRow", "(", "cls", ",", "row", ")", ":", "values", "=", "[", "]", "for", "value", "in", "row", ":", "try", ":", "value", "=", "'{0!s}'", ".", "format", "(", "value", ")", "except", "UnicodeDecodeError", ":", "# In Python 2, blobs are \"read-write buffer\" and will cause a", "# UnicodeDecodeError exception if we try format it as a string.", "# Since Python 3 does not support the buffer type we cannot check", "# the type of value.", "value", "=", "repr", "(", "value", ")", "values", ".", "append", "(", "value", ")", "return", "hash", "(", "' '", ".", "join", "(", "values", ")", ")" ]
Split the rawsource of a role into standard components .
def split_role_content ( role_rawsource ) : parts = { 'last_component' : False , 'display' : None , 'ref' : None } if role_rawsource . startswith ( '~' ) : # Only the last part of a namespace should be shown. parts [ 'last_component' ] = True # Strip that marker off role_rawsource = role_rawsource . lstrip ( '~' ) match = ROLE_DISPLAY_PATTERN . match ( role_rawsource ) if match : parts [ 'display' ] = match . group ( 'display' ) . strip ( ) parts [ 'ref' ] = match . group ( 'reference' ) . strip ( ) else : # No suggested display parts [ 'display' ] = None parts [ 'ref' ] = role_rawsource . strip ( ) return parts
11,703
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L156-L212
[ "def", "check_boundary_lines_similar", "(", "l_1", ",", "l_2", ")", ":", "num_matches", "=", "0", "if", "(", "type", "(", "l_1", ")", "!=", "list", ")", "or", "(", "type", "(", "l_2", ")", "!=", "list", ")", "or", "(", "len", "(", "l_1", ")", "!=", "len", "(", "l_2", ")", ")", ":", "# these 'boundaries' are not similar", "return", "0", "num_elements", "=", "len", "(", "l_1", ")", "for", "i", "in", "xrange", "(", "0", ",", "num_elements", ")", ":", "if", "l_1", "[", "i", "]", ".", "isdigit", "(", ")", "and", "l_2", "[", "i", "]", ".", "isdigit", "(", ")", ":", "# both lines are integers", "num_matches", "+=", "1", "else", ":", "l1_str", "=", "l_1", "[", "i", "]", ".", "lower", "(", ")", "l2_str", "=", "l_2", "[", "i", "]", ".", "lower", "(", ")", "if", "(", "l1_str", "[", "0", "]", "==", "l2_str", "[", "0", "]", ")", "and", "(", "l1_str", "[", "len", "(", "l1_str", ")", "-", "1", "]", "==", "l2_str", "[", "len", "(", "l2_str", ")", "-", "1", "]", ")", ":", "num_matches", "=", "num_matches", "+", "1", "if", "(", "len", "(", "l_1", ")", "==", "0", ")", "or", "(", "float", "(", "num_matches", ")", "/", "float", "(", "len", "(", "l_1", ")", ")", "<", "0.9", ")", ":", "return", "0", "else", ":", "return", "1" ]
Return a molecule which has largest graph in the compound Passing single molecule object will results as same as molutil . clone
def largest_graph ( mol ) : mol . require ( "Valence" ) mol . require ( "Topology" ) m = clone ( mol ) # Avoid modification of original object if m . isolated : for k in itertools . chain . from_iterable ( m . isolated ) : m . remove_atom ( k ) return m
11,704
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L64-L74
[ "def", "access", "(", "self", ",", "accessor", ",", "timeout", "=", "None", ")", ":", "if", "self", ".", "loop", ".", "is_running", "(", ")", ":", "raise", "RuntimeError", "(", "\"Loop is already running\"", ")", "coro", "=", "asyncio", ".", "wait_for", "(", "accessor", ",", "timeout", ",", "loop", "=", "self", ".", "loop", ")", "return", "self", ".", "loop", ".", "run_until_complete", "(", "coro", ")" ]
Hydrogen bond donor count
def H_donor_count ( mol ) : mol . require ( "Valence" ) return sum ( 1 for _ , a in mol . atoms_iter ( ) if a . H_donor )
11,705
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L105-L108
[ "def", "_sendStatCmd", "(", "self", ",", "cmd", ")", ":", "try", ":", "self", ".", "_conn", ".", "write", "(", "\"%s\\r\\n\"", "%", "cmd", ")", "regex", "=", "re", ".", "compile", "(", "'^(END|ERROR)\\r\\n'", ",", "re", ".", "MULTILINE", ")", "(", "idx", ",", "mobj", ",", "text", ")", "=", "self", ".", "_conn", ".", "expect", "(", "[", "regex", ",", "]", ",", "self", ".", "_timeout", ")", "#@UnusedVariable", "except", ":", "raise", "Exception", "(", "\"Communication with %s failed\"", "%", "self", ".", "_instanceName", ")", "if", "mobj", "is", "not", "None", ":", "if", "mobj", ".", "group", "(", "1", ")", "==", "'END'", ":", "return", "text", ".", "splitlines", "(", ")", "[", ":", "-", "1", "]", "elif", "mobj", ".", "group", "(", "1", ")", "==", "'ERROR'", ":", "raise", "Exception", "(", "\"Protocol error in communication with %s.\"", "%", "self", ".", "_instanceName", ")", "else", ":", "raise", "Exception", "(", "\"Connection with %s timed out.\"", "%", "self", ".", "_instanceName", ")" ]
Hydrogen bond acceptor count
def H_acceptor_count ( mol ) : mol . require ( "Valence" ) return sum ( 1 for _ , a in mol . atoms_iter ( ) if a . H_acceptor )
11,706
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L111-L114
[ "def", "_PrintSessionsOverview", "(", "self", ",", "storage_reader", ")", ":", "table_view", "=", "views", ".", "ViewsFactory", ".", "GetTableView", "(", "self", ".", "_views_format_type", ",", "title", "=", "'Sessions'", ")", "for", "session", "in", "storage_reader", ".", "GetSessions", "(", ")", ":", "start_time", "=", "timelib", ".", "Timestamp", ".", "CopyToIsoFormat", "(", "session", ".", "start_time", ")", "session_identifier", "=", "uuid", ".", "UUID", "(", "hex", "=", "session", ".", "identifier", ")", "session_identifier", "=", "'{0!s}'", ".", "format", "(", "session_identifier", ")", "table_view", ".", "AddRow", "(", "[", "session_identifier", ",", "start_time", "]", ")", "table_view", ".", "Write", "(", "self", ".", "_output_writer", ")" ]
Rotatable bond count
def rotatable_count ( mol ) : mol . require ( "Rotatable" ) return sum ( 1 for _ , _ , b in mol . bonds_iter ( ) if b . rotatable )
11,707
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L117-L120
[ "def", "parse_from_dict", "(", "json_dict", ")", ":", "history_columns", "=", "json_dict", "[", "'columns'", "]", "history_list", "=", "MarketHistoryList", "(", "upload_keys", "=", "json_dict", "[", "'uploadKeys'", "]", ",", "history_generator", "=", "json_dict", "[", "'generator'", "]", ",", ")", "for", "rowset", "in", "json_dict", "[", "'rowsets'", "]", ":", "generated_at", "=", "parse_datetime", "(", "rowset", "[", "'generatedAt'", "]", ")", "region_id", "=", "rowset", "[", "'regionID'", "]", "type_id", "=", "rowset", "[", "'typeID'", "]", "history_list", ".", "set_empty_region", "(", "region_id", ",", "type_id", ",", "generated_at", ")", "for", "row", "in", "rowset", "[", "'rows'", "]", ":", "history_kwargs", "=", "_columns_to_kwargs", "(", "SPEC_TO_KWARG_CONVERSION", ",", "history_columns", ",", "row", ")", "historical_date", "=", "parse_datetime", "(", "history_kwargs", "[", "'historical_date'", "]", ")", "history_kwargs", ".", "update", "(", "{", "'type_id'", ":", "type_id", ",", "'region_id'", ":", "region_id", ",", "'historical_date'", ":", "historical_date", ",", "'generated_at'", ":", "generated_at", ",", "}", ")", "history_list", ".", "add_entry", "(", "MarketHistoryEntry", "(", "*", "*", "history_kwargs", ")", ")", "return", "history_list" ]
Lipinski s rule of five violation count
def rule_of_five_violation ( mol ) : v = 0 if mw ( mol ) > 500 : v += 1 if H_donor_count ( mol ) > 5 : v += 1 if H_acceptor_count ( mol ) > 10 : v += 1 try : if wclogp . wclogp ( mol ) > 5 : v += 1 except TypeError : # N/A v += 1 return v
11,708
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L123-L137
[ "def", "CanonicalPathToLocalPath", "(", "path", ")", ":", "# Account for raw devices", "path", "=", "path", ".", "replace", "(", "\"/\\\\\"", ",", "\"\\\\\"", ")", "path", "=", "path", ".", "replace", "(", "\"/\"", ",", "\"\\\\\"", ")", "m", "=", "re", ".", "match", "(", "r\"\\\\([a-zA-Z]):(.*)$\"", ",", "path", ")", "if", "m", ":", "path", "=", "\"%s:\\\\%s\"", "%", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ".", "lstrip", "(", "\"\\\\\"", ")", ")", "return", "path" ]
Chemical formula . Atoms should be arranged in order of C H and other atoms . Molecules should be arranged in order of length of formula text .
def formula ( mol ) : mol . require ( "Valence" ) mol . require ( "Topology" ) total_cntr = Counter ( ) for m in sorted ( mols_iter ( mol ) , key = len , reverse = True ) : cntr = Counter ( ) for i in m : cntr += mol . atom ( i ) . composition ( ) text = [ ] Cs = cntr . pop ( "C" , 0 ) if Cs : text . append ( "C" ) if Cs > 1 : text . append ( str ( Cs ) ) Hs = cntr . pop ( "H" , 0 ) if Hs : text . append ( "H" ) if Hs > 1 : text . append ( str ( Hs ) ) heteros = sorted ( cntr . items ( ) , key = lambda x : atom_number ( x [ 0 ] ) ) for k , v in heteros : text . append ( k ) if v > 1 : text . append ( str ( v ) ) total_cntr [ "" . join ( text ) ] += 1 total = sorted ( total_cntr . items ( ) , key = lambda x : len ( x [ 0 ] ) , reverse = True ) total_text = [ ] for k , v in total : if v > 1 : total_text . append ( str ( v ) + k ) else : total_text . append ( k ) return "." . join ( total_text )
11,709
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L164-L200
[ "def", "serverinfo", "(", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "data", "=", "_wget", "(", "'serverinfo'", ",", "{", "}", ",", "url", ",", "timeout", "=", "timeout", ")", "if", "data", "[", "'res'", "]", "is", "False", ":", "return", "{", "'error'", ":", "data", "[", "'msg'", "]", "}", "ret", "=", "{", "}", "data", "[", "'msg'", "]", ".", "pop", "(", "0", ")", "for", "line", "in", "data", "[", "'msg'", "]", ":", "tmp", "=", "line", ".", "split", "(", "':'", ")", "ret", "[", "tmp", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "tmp", "[", "1", "]", ".", "strip", "(", ")", "return", "ret" ]
Show debugging information .
def debug ( * args , * * attrs ) : attrs . setdefault ( "is_flag" , True ) attrs . setdefault ( "default" , None ) return option ( debug , * args , * * attrs )
11,710
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L49-L53
[ "def", "validate_arguments", "(", "log", ",", "whitelisted_args", ",", "args", ")", ":", "valid_patterns", "=", "{", "re", ".", "compile", "(", "p", ")", ":", "v", "for", "p", ",", "v", "in", "whitelisted_args", ".", "items", "(", ")", "}", "def", "validate", "(", "idx", ")", ":", "arg", "=", "args", "[", "idx", "]", "for", "pattern", ",", "has_argument", "in", "valid_patterns", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "arg", ")", ":", "return", "2", "if", "has_argument", "else", "1", "log", ".", "warn", "(", "\"Zinc argument '{}' is not supported, and is subject to change/removal!\"", ".", "format", "(", "arg", ")", ")", "return", "1", "arg_index", "=", "0", "while", "arg_index", "<", "len", "(", "args", ")", ":", "arg_index", "+=", "validate", "(", "arg_index", ")" ]
Perform a dryrun .
def dryrun ( * args , * * attrs ) : attrs . setdefault ( "is_flag" , True ) attrs . setdefault ( "default" , None ) return option ( dryrun , * args , * * attrs )
11,711
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L56-L60
[ "def", "serialise", "(", "self", ",", "default_endianness", "=", "None", ")", ":", "# Figure out an endianness.", "endianness", "=", "(", "default_endianness", "or", "DEFAULT_ENDIANNESS", ")", "if", "hasattr", "(", "self", ",", "'_Meta'", ")", ":", "endianness", "=", "self", ".", "_Meta", ".", "get", "(", "'endianness'", ",", "endianness", ")", "inferred_fields", "=", "set", "(", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "inferred_fields", "|=", "{", "x", ".", "_name", "for", "x", "in", "v", ".", "dependent_fields", "(", ")", "}", "for", "field", "in", "inferred_fields", ":", "setattr", "(", "self", ",", "field", ",", "None", ")", "# Some fields want to manipulate other fields that appear before them (e.g. Unions)", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "v", ".", "prepare", "(", "self", ",", "getattr", "(", "self", ",", "k", ")", ")", "message", "=", "b''", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_type_mapping", ")", ":", "message", "+=", "v", ".", "value_to_bytes", "(", "self", ",", "getattr", "(", "self", ",", "k", ")", ",", "default_endianness", "=", "endianness", ")", "return", "message" ]
Override log file location .
def log ( * args , * * attrs ) : attrs . setdefault ( "metavar" , "PATH" ) attrs . setdefault ( "show_default" , False ) return option ( log , * args , * * attrs )
11,712
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L63-L67
[ "def", "get_records", "(", "self", ")", ":", "form", "=", "self", ".", "request", ".", "form", "ar_count", "=", "self", ".", "get_ar_count", "(", ")", "records", "=", "[", "]", "# Group belonging AR fields together", "for", "arnum", "in", "range", "(", "ar_count", ")", ":", "record", "=", "{", "}", "s1", "=", "\"-{}\"", ".", "format", "(", "arnum", ")", "keys", "=", "filter", "(", "lambda", "key", ":", "s1", "in", "key", ",", "form", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "new_key", "=", "key", ".", "replace", "(", "s1", ",", "\"\"", ")", "value", "=", "form", ".", "get", "(", "key", ")", "record", "[", "new_key", "]", "=", "value", "records", ".", "append", "(", "record", ")", "return", "records" ]
Show the version and exit .
def version ( * args , * * attrs ) : if hasattr ( sys , "_getframe" ) : package = attrs . pop ( "package" , sys . _getframe ( 1 ) . f_globals . get ( "__package__" ) ) if package : attrs . setdefault ( "version" , get_version ( package ) ) return click . version_option ( * args , * * attrs )
11,713
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L70-L76
[ "def", "is_domain_equal", "(", "self", ",", "other", ")", ":", "domain", "=", "self", ".", "get_domain", "(", ")", "other_domain", "=", "other", ".", "get_domain", "(", ")", "# if they share the same instance of memory for the domain", "if", "domain", "==", "other_domain", ":", "return", "True", "else", ":", "return", "False" ]
Convert molecule to RDMol
def to_rdmol ( mol ) : rwmol = Chem . RWMol ( Chem . MolFromSmiles ( '' ) ) key_to_idx = { } bond_type = { 1 : Chem . BondType . SINGLE , 2 : Chem . BondType . DOUBLE , 3 : Chem . BondType . TRIPLE } conf = Chem . Conformer ( rwmol . GetNumAtoms ( ) ) for k , a in mol . atoms_iter ( ) : i = rwmol . AddAtom ( Chem . Atom ( atom_number ( a . symbol ) ) ) key_to_idx [ k ] = i conf . SetAtomPosition ( i , a . coords ) rwmol . AddConformer ( conf ) for u , v , b in mol . bonds_iter ( ) : ui = key_to_idx [ u ] vi = key_to_idx [ v ] rwmol . AddBond ( ui , vi , bond_type [ b . order ] ) Chem . GetSSSR ( rwmol ) # Ring recognition is required for fingerprint rwmol . UpdatePropertyCache ( strict = False ) return rwmol . GetMol ( )
11,714
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/rdkit.py#L20-L39
[ "def", "get_available_options", "(", "self", ",", "service_name", ")", ":", "options", "=", "{", "}", "for", "data_dir", "in", "self", ".", "data_dirs", ":", "# Traverse all the directories trying to find the best match.", "service_glob", "=", "\"{0}-*.json\"", ".", "format", "(", "service_name", ")", "path", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "service_glob", ")", "found", "=", "glob", ".", "glob", "(", "path", ")", "for", "match", "in", "found", ":", "# Rip apart the path to determine the API version.", "base", "=", "os", ".", "path", ".", "basename", "(", "match", ")", "bits", "=", "os", ".", "path", ".", "splitext", "(", "base", ")", "[", "0", "]", ".", "split", "(", "'-'", ",", "1", ")", "if", "len", "(", "bits", ")", "<", "2", ":", "continue", "api_version", "=", "bits", "[", "1", "]", "options", ".", "setdefault", "(", "api_version", ",", "[", "]", ")", "options", "[", "api_version", "]", ".", "append", "(", "match", ")", "return", "options" ]
Calculate morgan fingerprint similarity by using RDKit radius = 2 roughly equivalent to ECFP4
def morgan_sim ( mol1 , mol2 , radius = 2 , digit = 3 ) : rdmol1 = to_rdmol ( mol1 ) rdmol2 = to_rdmol ( mol2 ) fp1 = AllChem . GetMorganFingerprint ( rdmol1 , radius ) fp2 = AllChem . GetMorganFingerprint ( rdmol2 , radius ) return round ( DataStructs . DiceSimilarity ( fp1 , fp2 ) , digit )
11,715
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/rdkit.py#L63-L71
[ "def", "_get_handler_set", "(", "cls", ",", "request", ",", "fail_enum", ",", "header_proto", "=", "None", ")", ":", "added", "=", "set", "(", ")", "handlers", "=", "[", "]", "for", "controls", "in", "request", ".", "sorting", ":", "control_bytes", "=", "controls", ".", "SerializeToString", "(", ")", "if", "control_bytes", "not", "in", "added", ":", "added", ".", "add", "(", "control_bytes", ")", "handlers", ".", "append", "(", "cls", ".", "_ValueHandler", "(", "controls", ",", "fail_enum", ",", "header_proto", ")", ")", "return", "handlers" ]
Assigns data to this object and builds the Merge Tree
def build ( self , X , Y , w = None , edges = None ) : super ( MergeTree , self ) . build ( X , Y , w , edges ) if self . debug : sys . stdout . write ( "Merge Tree Computation: " ) start = time . clock ( ) self . __tree = MergeTreeFloat ( vectorFloat ( self . Xnorm . flatten ( ) ) , vectorFloat ( self . Y ) , str ( self . gradient ) , self . graph_rep . full_graph ( ) , self . debug , ) self . _internal_build ( ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
11,716
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L103-L133
[ "def", "_wait_for_machine_booted", "(", "name", ",", "suffictinet_texts", "=", "None", ")", ":", "# TODO: rewrite it using probes module in utils", "suffictinet_texts", "=", "suffictinet_texts", "or", "[", "\"systemd-logind\"", "]", "# optionally use: \"Unit: machine\"", "for", "foo", "in", "range", "(", "constants", ".", "DEFAULT_RETRYTIMEOUT", ")", ":", "time", ".", "sleep", "(", "constants", ".", "DEFAULT_SLEEP", ")", "out", "=", "run_cmd", "(", "[", "\"machinectl\"", ",", "\"--no-pager\"", ",", "\"status\"", ",", "name", "]", ",", "ignore_status", "=", "True", ",", "return_output", "=", "True", ")", "for", "restr", "in", "suffictinet_texts", ":", "if", "restr", "in", "out", ":", "time", ".", "sleep", "(", "constants", ".", "DEFAULT_SLEEP", ")", "return", "True", "raise", "ConuException", "(", "\"Unable to start machine %s within %d (machinectl status command dos not contain %s)\"", "%", "(", "name", ",", "constants", ".", "DEFAULT_RETRYTIMEOUT", ",", "suffictinet_texts", ")", ")" ]
A helper function that will reduce duplication of data by reusing the parent contour tree s parameters and data
def build_for_contour_tree ( self , contour_tree , negate = False ) : if self . debug : tree_type = "Join" if negate : tree_type = "Split" sys . stdout . write ( "{} Tree Computation: " . format ( tree_type ) ) start = time . clock ( ) Y = contour_tree . Y if negate : Y = - Y self . __tree = MergeTreeFloat ( vectorFloat ( contour_tree . Xnorm . flatten ( ) ) , vectorFloat ( Y ) , str ( contour_tree . gradient ) , contour_tree . graph_rep . full_graph ( ) , self . debug , ) self . _internal_build ( ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
11,717
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L135-L160
[ "def", "set_exception", "(", "self", ",", "exception", ")", ":", "assert", "isinstance", "(", "exception", ",", "Exception", ")", ",", "\"%r should be an Exception\"", "%", "exception", "self", ".", "_exception", "=", "exception", "self", ".", "_state", "=", "self", ".", "FINISHED" ]
Convenient wrapper around functions that should exit or raise an exception
def verify_abort ( func , * args , * * kwargs ) : expected_exception = kwargs . pop ( "expected_exception" , runez . system . AbortException ) with CaptureOutput ( ) as logged : try : value = func ( * args , * * kwargs ) assert False , "%s did not raise, but returned %s" % ( func , value ) except expected_exception : return str ( logged )
11,718
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/context.py#L248-L270
[ "def", "Add", "(", "self", ",", "other", ")", ":", "if", "len", "(", "self", ".", "data", ")", "!=", "len", "(", "other", ".", "data", ")", ":", "raise", "RuntimeError", "(", "\"Can only add series of identical lengths.\"", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "data", ")", ")", ":", "if", "self", ".", "data", "[", "i", "]", "[", "1", "]", "!=", "other", ".", "data", "[", "i", "]", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"Timestamp mismatch.\"", ")", "if", "self", ".", "data", "[", "i", "]", "[", "0", "]", "is", "None", "and", "other", ".", "data", "[", "i", "]", "[", "0", "]", "is", "None", ":", "continue", "self", ".", "data", "[", "i", "]", "[", "0", "]", "=", "(", "self", ".", "data", "[", "i", "]", "[", "0", "]", "or", "0", ")", "+", "(", "other", ".", "data", "[", "i", "]", "[", "0", "]", "or", "0", ")" ]
Current content popped useful for testing
def pop ( self , strip = False ) : r = self . contents ( ) self . clear ( ) if r and strip : r = r . strip ( ) return r
11,719
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/context.py#L72-L78
[ "def", "list_releases", "(", ")", ":", "response", "=", "requests", ".", "get", "(", "PYPI_URL", ".", "format", "(", "package", "=", "PYPI_PACKAGE_NAME", ")", ")", "if", "response", ":", "data", "=", "response", ".", "json", "(", ")", "releases_dict", "=", "data", ".", "get", "(", "'releases'", ",", "{", "}", ")", "if", "releases_dict", ":", "for", "version", ",", "release", "in", "releases_dict", ".", "items", "(", ")", ":", "release_formats", "=", "[", "]", "published_on_date", "=", "None", "for", "fmt", "in", "release", ":", "release_formats", ".", "append", "(", "fmt", ".", "get", "(", "'packagetype'", ")", ")", "published_on_date", "=", "fmt", ".", "get", "(", "'upload_time'", ")", "release_formats", "=", "' | '", ".", "join", "(", "release_formats", ")", "print", "(", "'{:<10}{:>15}{:>25}'", ".", "format", "(", "version", ",", "published_on_date", ",", "release_formats", ")", ")", "else", ":", "print", "(", "'No releases found for {}'", ".", "format", "(", "PYPI_PACKAGE_NAME", ")", ")", "else", ":", "print", "(", "'Package \"{}\" not found on Pypi.org'", ".", "format", "(", "PYPI_PACKAGE_NAME", ")", ")" ]
Get svg string
def contents ( self ) : c = self . _header [ : ] c . append ( ' font-weight="{}"' . format ( self . font_weight ) ) c . append ( ' font-family="{}"' . format ( self . font_family ) ) c . append ( ' width="{}" height="{}"' . format ( * self . screen_size ) ) sclw = self . original_size [ 0 ] * self . scale_factor sclh = self . original_size [ 1 ] * self . scale_factor longside = max ( [ sclw , sclh ] ) width = round ( longside + self . margin * 2 , 2 ) height = round ( longside + self . margin * 2 , 2 ) xleft = round ( - self . margin - ( longside - sclw ) / 2 , 2 ) ytop = round ( - self . margin - ( longside - sclh ) / 2 , 2 ) c . append ( ' viewBox="{} {} {} {}">\n' . format ( xleft , ytop , width , height ) ) if self . bgcolor is not None : c . append ( '<rect x="{}", y="{}" width="{}" height="{}" fill="{}" \ />\n' . format ( xleft , ytop , width , height , self . bgcolor ) ) c . extend ( self . _elems ) c . append ( "</svg>" ) return "" . join ( c )
11,720
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L49-L70
[ "def", "namer", "(", "cls", ",", "imageUrl", ",", "pageUrl", ")", ":", "start", "=", "''", "tsmatch", "=", "compile", "(", "r'/(\\d+)-'", ")", ".", "search", "(", "imageUrl", ")", "if", "tsmatch", ":", "start", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "tsmatch", ".", "group", "(", "1", ")", ")", ")", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "else", ":", "# There were only chapter 1, page 4 and 5 not matching when writing", "# this...", "start", "=", "'2015-04-11x'", "return", "start", "+", "\"-\"", "+", "pageUrl", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]" ]
Get svg in Data URL Scheme format .
def data_url_scheme ( self ) : # TODO: move to web.app or make it function # remove #svg from dataframe encoded = base64 . b64encode ( self . contents ( ) . encode ( ) ) return "data:image/svg+xml;base64," + encoded . decode ( )
11,721
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L72-L78
[ "def", "result", "(", "self", ")", ":", "self", ".", "_reactor_check", "(", ")", "self", ".", "_event", ".", "wait", "(", ")", "if", "self", ".", "_exception", ":", "six", ".", "reraise", "(", "self", ".", "_exception", ".", "__class__", ",", "self", ".", "_exception", ",", "self", ".", "_traceback", ")", "if", "self", ".", "_result", "==", "NONE_RESULT", ":", "return", "None", "else", ":", "return", "self", ".", "_result" ]
For Svg coordinate system reflect over X axis and translate from center to top - left
def _coords_conv ( self , pos ) : px = ( self . original_size [ 0 ] / 2 + pos [ 0 ] ) * self . scale_factor py = ( self . original_size [ 1 ] / 2 - pos [ 1 ] ) * self . scale_factor return round ( px , 2 ) , round ( py , 2 )
11,722
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L89-L95
[ "async", "def", "async_get_bridgeid", "(", "session", ",", "host", ",", "port", ",", "api_key", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'http://{}:{}/api/{}/config'", ".", "format", "(", "host", ",", "str", "(", "port", ")", ",", "api_key", ")", "response", "=", "await", "async_request", "(", "session", ".", "get", ",", "url", ")", "bridgeid", "=", "response", "[", "'bridgeid'", "]", "_LOGGER", ".", "info", "(", "\"Bridge id: %s\"", ",", "bridgeid", ")", "return", "bridgeid" ]
Returns the standard logger
def get_logger ( self ) : if Global . LOGGER : Global . LOGGER . debug ( 'configuring a logger' ) if self . _logger_instance is not None : return self . _logger_instance self . _logger_instance = logging . getLogger ( "flowsLogger" ) self . _logger_instance . setLevel ( logging . DEBUG ) log_format = '%(asctime)s - [%(levelname)s]|%(thread)d\t%(message)s' log_date_format = '%Y-%m-%d %H:%M:%S' formatter = logging . Formatter ( log_format , log_date_format ) new_log_stream_handler = logging . StreamHandler ( ) new_log_stream_handler . setFormatter ( formatter ) new_log_stream_handler . setLevel ( logging . INFO ) self . _logger_instance . addHandler ( new_log_stream_handler ) return self . _logger_instance
11,723
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsLogger.py#L39-L61
[ "def", "handleServerEvents", "(", "self", ",", "msg", ")", ":", "self", ".", "log", ".", "debug", "(", "'MSG %s'", ",", "msg", ")", "self", ".", "handleConnectionState", "(", "msg", ")", "if", "msg", ".", "typeName", "==", "\"error\"", ":", "self", ".", "handleErrorEvents", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CURRENT_TIME\"", "]", ":", "if", "self", ".", "time", "<", "msg", ".", "time", ":", "self", ".", "time", "=", "msg", ".", "time", "elif", "(", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_MKT_DEPTH\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_MKT_DEPTH_L2\"", "]", ")", ":", "self", ".", "handleMarketDepth", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_STRING\"", "]", ":", "self", ".", "handleTickString", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_PRICE\"", "]", ":", "self", ".", "handleTickPrice", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_GENERIC\"", "]", ":", "self", ".", "handleTickGeneric", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_SIZE\"", "]", ":", "self", ".", "handleTickSize", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_TICK_OPTION\"", "]", ":", "self", ".", "handleTickOptionComputation", "(", "msg", ")", "elif", "(", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_OPEN_ORDER\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_OPEN_ORDER_END\"", "]", "or", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_ORDER_STATUS\"", "]", ")", ":", "self", ".", "handleOrders", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_HISTORICAL_DATA\"", "]", ":", "self", ".", "handleHistoricalData", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_ACCOUNT_UPDATES\"", "]", ":", "self", ".", "handleAccount", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_PORTFOLIO_UPDATES\"", "]", ":", "self", ".", "handlePortfolio", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_POSITION\"", "]", ":", "self", ".", "handlePosition", "(", "msg", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TYPE_NEXT_ORDER_ID\"", "]", ":", "self", ".", "handleNextValidId", "(", "msg", ".", "orderId", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONNECTION_CLOSED\"", "]", ":", "self", ".", "handleConnectionClosed", "(", "msg", ")", "# elif msg.typeName == dataTypes[\"MSG_TYPE_MANAGED_ACCOUNTS\"]:", "# self.accountCode = msg.accountsList", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_COMMISSION_REPORT\"", "]", ":", "self", ".", "commission", "=", "msg", ".", "commissionReport", ".", "m_commission", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONTRACT_DETAILS\"", "]", ":", "self", ".", "handleContractDetails", "(", "msg", ",", "end", "=", "False", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_CONTRACT_DETAILS_END\"", "]", ":", "self", ".", "handleContractDetails", "(", "msg", ",", "end", "=", "True", ")", "elif", "msg", ".", "typeName", "==", "dataTypes", "[", "\"MSG_TICK_SNAPSHOT_END\"", "]", ":", "self", ".", "ibCallback", "(", "caller", "=", "\"handleTickSnapshotEnd\"", ",", "msg", "=", "msg", ")", "else", ":", "# log handler msg", "self", ".", "log_msg", "(", "\"server\"", ",", "msg", ")" ]
Returns a new standard logger instance
def reconfigure_log_level ( self ) : if Global . LOGGER : Global . LOGGER . debug ( 'reconfiguring logger level' ) stream_handlers = filter ( lambda x : type ( x ) is logging . StreamHandler , self . _logger_instance . handlers ) for x in stream_handlers : x . level = Global . CONFIG_MANAGER . log_level return self . get_logger ( )
11,724
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsLogger.py#L63-L75
[ "def", "parse_torrent_properties", "(", "table_datas", ")", ":", "output", "=", "{", "'category'", ":", "table_datas", "[", "0", "]", ".", "text", ",", "'subcategory'", ":", "None", ",", "'quality'", ":", "None", ",", "'language'", ":", "None", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "table_datas", ")", ")", ":", "td", "=", "table_datas", "[", "i", "]", "url", "=", "td", ".", "get", "(", "'href'", ")", "params", "=", "Parser", ".", "get_params", "(", "url", ")", "if", "Parser", ".", "is_subcategory", "(", "params", ")", "and", "not", "output", "[", "'subcategory'", "]", ":", "output", "[", "'subcategory'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_quality", "(", "params", ")", "and", "not", "output", "[", "'quality'", "]", ":", "output", "[", "'quality'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_language", "(", "params", ")", "and", "not", "output", "[", "'language'", "]", ":", "output", "[", "'language'", "]", "=", "td", ".", "text", "return", "output" ]
Factory for a toctree node .
def _build_toctree_node ( parent = None , entries = None , includefiles = None , caption = None ) : # Add the toctree's node itself subnode = sphinx . addnodes . toctree ( ) subnode [ 'parent' ] = parent subnode [ 'entries' ] = entries subnode [ 'includefiles' ] = includefiles subnode [ 'caption' ] = caption # These values are needed for toctree node types. We don't need/want # these to be configurable for module-toctree. subnode [ 'maxdepth' ] = 1 subnode [ 'hidden' ] = False subnode [ 'glob' ] = None subnode [ 'hidden' ] = False subnode [ 'includehidden' ] = False subnode [ 'numbered' ] = 0 subnode [ 'titlesonly' ] = False return subnode
11,725
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L228-L247
[ "def", "alloc_data", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", ":", "return", "self", ".", "_alloc_data", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "return", "self", ".", "_alloc_data", "(", "value", ".", "encode", "(", "'utf-8'", ")", "+", "b'\\0'", ")", "else", ":", "raise", "TypeError", "(", "'No idea how to encode %s'", "%", "repr", "(", "value", ")", ")" ]
Parse the skip option of skipped module names .
def _parse_skip_option ( self ) : try : skip_text = self . options [ 'skip' ] except KeyError : return [ ] modules = [ module . strip ( ) for module in skip_text . split ( ',' ) ] return modules
11,726
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L94-L103
[ "def", "verify2", "(", "self", ",", "atv_public_key", ",", "data", ")", ":", "self", ".", "_check_initialized", "(", ")", "log_binary", "(", "_LOGGER", ",", "'Verify'", ",", "PublicSecret", "=", "atv_public_key", ",", "Data", "=", "data", ")", "# Generate a shared secret key", "public", "=", "curve25519", ".", "Public", "(", "atv_public_key", ")", "shared", "=", "self", ".", "_verify_private", ".", "get_shared_key", "(", "public", ",", "hashfunc", "=", "lambda", "x", ":", "x", ")", "# No additional hashing used", "log_binary", "(", "_LOGGER", ",", "'Shared secret'", ",", "Secret", "=", "shared", ")", "# Derive new AES key and IV from shared key", "aes_key", "=", "hash_sha512", "(", "'Pair-Verify-AES-Key'", ",", "shared", ")", "[", "0", ":", "16", "]", "aes_iv", "=", "hash_sha512", "(", "'Pair-Verify-AES-IV'", ",", "shared", ")", "[", "0", ":", "16", "]", "log_binary", "(", "_LOGGER", ",", "'Pair-Verify-AES'", ",", "Key", "=", "aes_key", ",", "IV", "=", "aes_iv", ")", "# Sign public keys and encrypt with AES", "signer", "=", "SigningKey", "(", "self", ".", "_auth_private", ")", "signed", "=", "signer", ".", "sign", "(", "self", ".", "_verify_public", ".", "serialize", "(", ")", "+", "atv_public_key", ")", "signature", ",", "_", "=", "aes_encrypt", "(", "modes", ".", "CTR", ",", "aes_key", ",", "aes_iv", ",", "data", ",", "signed", ")", "log_binary", "(", "_LOGGER", ",", "'Signature'", ",", "Signature", "=", "signature", ")", "# Signature is prepended with 0x00000000 (alignment?)", "return", "b'\\x00\\x00\\x00\\x00'", "+", "signature" ]
Parse the skip option of skipped package names .
def _parse_skip_option ( self ) : try : skip_text = self . options [ 'skip' ] except KeyError : return [ ] packages = [ package . strip ( ) for package in skip_text . split ( ',' ) ] return packages
11,727
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L188-L197
[ "def", "verify2", "(", "self", ",", "atv_public_key", ",", "data", ")", ":", "self", ".", "_check_initialized", "(", ")", "log_binary", "(", "_LOGGER", ",", "'Verify'", ",", "PublicSecret", "=", "atv_public_key", ",", "Data", "=", "data", ")", "# Generate a shared secret key", "public", "=", "curve25519", ".", "Public", "(", "atv_public_key", ")", "shared", "=", "self", ".", "_verify_private", ".", "get_shared_key", "(", "public", ",", "hashfunc", "=", "lambda", "x", ":", "x", ")", "# No additional hashing used", "log_binary", "(", "_LOGGER", ",", "'Shared secret'", ",", "Secret", "=", "shared", ")", "# Derive new AES key and IV from shared key", "aes_key", "=", "hash_sha512", "(", "'Pair-Verify-AES-Key'", ",", "shared", ")", "[", "0", ":", "16", "]", "aes_iv", "=", "hash_sha512", "(", "'Pair-Verify-AES-IV'", ",", "shared", ")", "[", "0", ":", "16", "]", "log_binary", "(", "_LOGGER", ",", "'Pair-Verify-AES'", ",", "Key", "=", "aes_key", ",", "IV", "=", "aes_iv", ")", "# Sign public keys and encrypt with AES", "signer", "=", "SigningKey", "(", "self", ".", "_auth_private", ")", "signed", "=", "signer", ".", "sign", "(", "self", ".", "_verify_public", ".", "serialize", "(", ")", "+", "atv_public_key", ")", "signature", ",", "_", "=", "aes_encrypt", "(", "modes", ".", "CTR", ",", "aes_key", ",", "aes_iv", ",", "data", ",", "signed", ")", "log_binary", "(", "_LOGGER", ",", "'Signature'", ",", "Signature", "=", "signature", ")", "# Signature is prepended with 0x00000000 (alignment?)", "return", "b'\\x00\\x00\\x00\\x00'", "+", "signature" ]
Set internal configuration variables according to the input parameters
def _set_command_line_arguments ( self , args ) : Global . LOGGER . debug ( "setting command line arguments" ) if args . VERBOSE : Global . LOGGER . debug ( "verbose mode active" ) Global . CONFIG_MANAGER . log_level = logging . DEBUG Global . LOGGER_INSTANCE . reconfigure_log_level ( ) if args . STATS > 0 : Global . LOGGER . debug ( f"stats requested every {args.STATS} seconds" ) Global . CONFIG_MANAGER . show_stats = True Global . CONFIG_MANAGER . stats_timeout = args . STATS if args . INTERVAL > 0 : Global . LOGGER . debug ( f"setting sleep interval to {args.INTERVAL} milliseconds" ) Global . CONFIG_MANAGER . sleep_interval = float ( args . INTERVAL ) / 1000 if args . TRACE : Global . LOGGER . debug ( "tracing mode active" ) Global . CONFIG_MANAGER . tracing_mode = True Global . CONFIG_MANAGER . log_level = logging . DEBUG Global . LOGGER_INSTANCE . reconfigure_log_level ( ) if args . MESSAGEINTERVAL is not None and args . MESSAGEINTERVAL > 0 : Global . LOGGER . debug ( f"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds" ) Global . CONFIG_MANAGER . message_fetcher_sleep_interval = float ( args . MESSAGEINTERVAL ) / 10000 Global . CONFIG_MANAGER . fixed_message_fetcher_interval = True Global . LOGGER . debug ( f"recipes to be parsed: {args.FILENAME}" ) Global . CONFIG_MANAGER . recipes = ( args . FILENAME )
11,728
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L66-L99
[ "def", "getid", "(", "self", ",", "idtype", ")", ":", "memorable_id", "=", "None", "while", "memorable_id", "in", "self", ".", "_ids", ":", "l", "=", "[", "]", "for", "_", "in", "range", "(", "4", ")", ":", "l", ".", "append", "(", "str", "(", "randint", "(", "0", ",", "19", ")", ")", ")", "memorable_id", "=", "''", ".", "join", "(", "l", ")", "self", ".", "_ids", ".", "append", "(", "memorable_id", ")", "return", "idtype", "+", "'-'", "+", "memorable_id" ]
Start all the processes
def start ( self ) : Global . LOGGER . info ( "starting the flow manager" ) self . _start_actions ( ) self . _start_message_fetcher ( ) Global . LOGGER . debug ( "flow manager started" )
11,729
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L102-L109
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Stop all the processes
def stop ( self ) : Global . LOGGER . info ( "stopping the flow manager" ) self . _stop_actions ( ) self . isrunning = False Global . LOGGER . debug ( "flow manager stopped" )
11,730
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L111-L118
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Restart all the processes
def restart ( self ) : Global . LOGGER . info ( "restarting the flow manager" ) self . _stop_actions ( ) # stop the old actions self . actions = [ ] # clear the action list self . _start_actions ( ) # start the configured actions Global . LOGGER . debug ( "flow manager restarted" )
11,731
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L120-L128
[ "def", "_adapt_WSDateTime", "(", "dt", ")", ":", "try", ":", "ts", "=", "int", "(", "(", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "pytz", ".", "utc", ")", ")", ".", "total_seconds", "(", ")", ")", "except", "(", "OverflowError", ",", "OSError", ")", ":", "if", "dt", "<", "datetime", ".", "now", "(", ")", ":", "ts", "=", "0", "else", ":", "ts", "=", "2", "**", "63", "-", "1", "return", "ts" ]
Start all the actions for the recipes
def _start_actions ( self ) : Global . LOGGER . info ( "starting actions" ) for recipe in Global . CONFIG_MANAGER . recipes : Global . CONFIG_MANAGER . read_recipe ( recipe ) list ( map ( lambda section : self . _start_action_for_section ( section ) , Global . CONFIG_MANAGER . sections ) )
11,732
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L130-L140
[ "def", "logEndTime", "(", ")", ":", "logger", ".", "info", "(", "'\\n'", "+", "'#'", "*", "70", ")", "logger", ".", "info", "(", "'Complete'", ")", "logger", ".", "info", "(", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "\"%A, %d %B %Y %I:%M%p\"", ")", ")", "logger", ".", "info", "(", "'#'", "*", "70", "+", "'\\n'", ")" ]
Start all the actions for a particular section
def _start_action_for_section ( self , section ) : if section == "configuration" : return Global . LOGGER . debug ( "starting actions for section " + section ) # read the configuration of the action action_configuration = Global . CONFIG_MANAGER . sections [ section ] if len ( action_configuration ) == 0 : Global . LOGGER . warn ( f"section {section} has no configuration, skipping" ) return action_type = None # action_input = None new_managed_input = [ ] if "type" in action_configuration : action_type = action_configuration [ "type" ] if "input" in action_configuration : action_input = action_configuration [ "input" ] new_managed_input = ( item . strip ( ) for item in action_input . split ( "," ) ) my_action = Action . create_action_for_code ( action_type , section , action_configuration , list ( new_managed_input ) ) if not my_action : Global . LOGGER . warn ( f"can't find a type for action {section}, the action will be skipped" ) return self . actions . append ( my_action ) Global . LOGGER . debug ( "updating the subscriptions table" ) for my_input in my_action . monitored_input : self . subscriptions . setdefault ( my_input , [ ] ) . append ( my_action )
11,733
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L142-L185
[ "def", "get_free_gpus", "(", "max_procs", "=", "0", ")", ":", "# Try connect with NVIDIA drivers", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "try", ":", "py3nvml", ".", "nvmlInit", "(", ")", "except", ":", "str_", "=", "\"\"\"Couldn't connect to nvml drivers. Check they are installed correctly.\"\"\"", "warnings", ".", "warn", "(", "str_", ",", "RuntimeWarning", ")", "logger", ".", "warn", "(", "str_", ")", "return", "[", "]", "num_gpus", "=", "py3nvml", ".", "nvmlDeviceGetCount", "(", ")", "gpu_free", "=", "[", "False", "]", "*", "num_gpus", "for", "i", "in", "range", "(", "num_gpus", ")", ":", "try", ":", "h", "=", "py3nvml", ".", "nvmlDeviceGetHandleByIndex", "(", "i", ")", "except", ":", "continue", "procs", "=", "try_get_info", "(", "py3nvml", ".", "nvmlDeviceGetComputeRunningProcesses", ",", "h", ",", "[", "'something'", "]", ")", "if", "len", "(", "procs", ")", "<=", "max_procs", ":", "gpu_free", "[", "i", "]", "=", "True", "py3nvml", ".", "nvmlShutdown", "(", ")", "return", "gpu_free" ]
Stop all the actions
def _stop_actions ( self ) : Global . LOGGER . info ( "stopping actions" ) list ( map ( lambda x : x . stop ( ) , self . actions ) ) Global . LOGGER . info ( "actions stopped" )
11,734
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L187-L195
[ "def", "calibrate_data", "(", "params", ",", "raw_data", ",", "calib_data", ")", ":", "start", "=", "calib_data", ".", "before", "(", "datetime", ".", "max", ")", "if", "start", "is", "None", ":", "start", "=", "datetime", ".", "min", "start", "=", "raw_data", ".", "after", "(", "start", "+", "SECOND", ")", "if", "start", "is", "None", ":", "return", "start", "del", "calib_data", "[", "start", ":", "]", "calibrator", "=", "Calib", "(", "params", ",", "raw_data", ")", "def", "calibgen", "(", "inputdata", ")", ":", "\"\"\"Internal generator function\"\"\"", "count", "=", "0", "for", "data", "in", "inputdata", ":", "idx", "=", "data", "[", "'idx'", "]", "count", "+=", "1", "if", "count", "%", "10000", "==", "0", ":", "logger", ".", "info", "(", "\"calib: %s\"", ",", "idx", ".", "isoformat", "(", "' '", ")", ")", "elif", "count", "%", "500", "==", "0", ":", "logger", ".", "debug", "(", "\"calib: %s\"", ",", "idx", ".", "isoformat", "(", "' '", ")", ")", "for", "key", "in", "(", "'rain'", ",", "'abs_pressure'", ",", "'temp_in'", ")", ":", "if", "data", "[", "key", "]", "is", "None", ":", "logger", ".", "error", "(", "'Ignoring invalid data at %s'", ",", "idx", ".", "isoformat", "(", "' '", ")", ")", "break", "else", ":", "yield", "calibrator", ".", "calib", "(", "data", ")", "calib_data", ".", "update", "(", "calibgen", "(", "raw_data", "[", "start", ":", "]", ")", ")", "return", "start" ]
Perform a system check to define if we need to throttle to handle all the incoming messages
def _perform_system_check ( self ) : if Global . CONFIG_MANAGER . tracing_mode : Global . LOGGER . debug ( "performing a system check" ) now = datetime . datetime . now ( ) sent = Global . MESSAGE_DISPATCHER . dispatched received = self . fetched queue_length = sent - received message_sleep_interval = Global . CONFIG_MANAGER . message_fetcher_sleep_interval if Global . CONFIG_MANAGER . show_stats : if ( now - self . last_stats_check_date ) . total_seconds ( ) > Global . CONFIG_MANAGER . stats_timeout : self . last_stats_check_date = now stats_string = f"showing stats\n--- [STATS] ---\nMessage Sent: {sent}\nMessage Received: {received}\nMessage Sleep Interval = {message_sleep_interval}\nQueue length = {queue_length}\n--- [ END ] ---" Global . LOGGER . info ( stats_string ) # if we are accumulating messages, or we have processed at least 5000 messages # since last check, we need to speed up the process messages_limit_reached = sent - self . last_queue_check_count > Global . CONFIG_MANAGER . messages_dispatched_for_system_check queue_limit_reached = queue_length > Global . CONFIG_MANAGER . queue_length_for_system_check time_limit_since_last_check_is_over = ( now - self . last_queue_check_date ) . total_seconds ( ) > Global . CONFIG_MANAGER . seconds_between_queue_check if not Global . CONFIG_MANAGER . fixed_message_fetcher_interval : if ( messages_limit_reached ) or ( queue_limit_reached and time_limit_since_last_check_is_over ) : cause = "messages limit reached" if messages_limit_reached else "queue limit reached" Global . LOGGER . debug ( f"triggering the throttle function due to {cause}" ) self . _adapt_sleep_interval ( sent , received , queue_length , now )
11,735
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L197-L227
[ "def", "populateFromFile", "(", "self", ",", "dataUrls", ",", "indexFiles", ")", ":", "assert", "len", "(", "dataUrls", ")", "==", "len", "(", "indexFiles", ")", "for", "dataUrl", ",", "indexFile", "in", "zip", "(", "dataUrls", ",", "indexFiles", ")", ":", "varFile", "=", "pysam", ".", "VariantFile", "(", "dataUrl", ",", "index_filename", "=", "indexFile", ")", "try", ":", "self", ".", "_populateFromVariantFile", "(", "varFile", ",", "dataUrl", ",", "indexFile", ")", "finally", ":", "varFile", ".", "close", "(", ")" ]
Deliver the message to the subscripted actions
def _deliver_message ( self , msg ) : my_subscribed_actions = self . subscriptions . get ( msg . sender , [ ] ) for action in my_subscribed_actions : if Global . CONFIG_MANAGER . tracing_mode : Global . LOGGER . debug ( f"delivering message to {action.name}" ) action . on_input_received ( msg )
11,736
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L229-L237
[ "def", "overlay_gateway_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "get_config", "=", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", "if", "not", "get_config", ":", "gw_name", "=", "kwargs", ".", "pop", "(", "'gw_name'", ")", "gw_type", "=", "kwargs", ".", "pop", "(", "'gw_type'", ")", "gw_args", "=", "dict", "(", "name", "=", "gw_name", ",", "gw_type", "=", "gw_type", ")", "overlay_gw", "=", "getattr", "(", "self", ".", "_tunnels", ",", "'overlay_gateway_gw_type'", ")", "config", "=", "overlay_gw", "(", "*", "*", "gw_args", ")", "if", "get_config", ":", "overlay_gw", "=", "getattr", "(", "self", ".", "_tunnels", ",", "'overlay_gateway_gw_type'", ")", "config", "=", "overlay_gw", "(", "name", "=", "''", ",", "gw_type", "=", "''", ")", "output", "=", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "if", "output", ".", "data", ".", "find", "(", "'.//{*}name'", ")", "is", "not", "None", ":", "gwtype", "=", "output", ".", "data", ".", "find", "(", "'.//{*}gw-type'", ")", ".", "text", "return", "gwtype", "else", ":", "return", "None", "return", "callback", "(", "config", ")" ]
Get an input message from the socket
def _fetch_messages ( self ) : try : [ _ , msg ] = self . socket . recv_multipart ( flags = zmq . NOBLOCK ) if Global . CONFIG_MANAGER . tracing_mode : Global . LOGGER . debug ( "fetched a new message" ) self . fetched = self . fetched + 1 obj = pickle . loads ( msg ) self . _deliver_message ( obj ) return obj except zmq . error . Again : return None except Exception as new_exception : Global . LOGGER . error ( new_exception ) raise new_exception
11,737
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L239-L256
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Register callback for message fetcher coroutines
async def message_fetcher_coroutine ( self , loop ) : Global . LOGGER . debug ( 'registering callbacks for message fetcher coroutine' ) self . isrunning = True while self . isrunning : loop . call_soon ( self . _fetch_messages ) loop . call_soon ( self . _perform_system_check ) await asyncio . sleep ( Global . CONFIG_MANAGER . message_fetcher_sleep_interval ) Global . LOGGER . debug ( 'message fetcher stopped' )
11,738
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L258-L269
[ "def", "exp_files", "(", "self", ")", ":", "ret", "=", "OrderedDict", "(", ")", "# restore the order of the experiments", "exp_file", "=", "self", ".", "exp_file", "if", "osp", ".", "exists", "(", "exp_file", ")", ":", "for", "key", ",", "val", "in", "safe_load", "(", "exp_file", ")", ".", "items", "(", ")", ":", "ret", "[", "key", "]", "=", "val", "for", "project", ",", "d", "in", "self", ".", "projects", ".", "items", "(", ")", ":", "project_path", "=", "d", "[", "'root'", "]", "config_path", "=", "osp", ".", "join", "(", "project_path", ",", "'.project'", ")", "if", "not", "osp", ".", "exists", "(", "config_path", ")", ":", "continue", "for", "fname", "in", "glob", ".", "glob", "(", "osp", ".", "join", "(", "config_path", ",", "'*.yml'", ")", ")", ":", "if", "fname", "==", "'.project.yml'", ":", "continue", "exp", "=", "osp", ".", "splitext", "(", "osp", ".", "basename", "(", "fname", ")", ")", "[", "0", "]", "if", "not", "isinstance", "(", "ret", ".", "get", "(", "exp", ")", ",", "Archive", ")", ":", "ret", "[", "exp", "]", "=", "osp", ".", "join", "(", "config_path", ",", "exp", "+", "'.yml'", ")", "if", "exp", "not", "in", "self", ".", "_project_map", "[", "project", "]", ":", "self", ".", "_project_map", "[", "project", "]", ".", "append", "(", "exp", ")", "return", "ret" ]
Adapt sleep time based on the number of the messages in queue
def _adapt_sleep_interval ( self , sent , received , queue , now ) : Global . LOGGER . debug ( "adjusting sleep interval" ) dispatched_since_last_check = sent - self . last_queue_check_count seconds_since_last_check = ( now - self . last_queue_check_date ) . total_seconds ( ) Global . LOGGER . debug ( str ( dispatched_since_last_check ) + " dispatched in the last " + str ( seconds_since_last_check ) ) sleep_time = ( seconds_since_last_check / ( dispatched_since_last_check + queue + 1 ) ) * 0.75 if sleep_time > 0.5 : sleep_time = 0.5 if sleep_time < 0.0001 : sleep_time = 0.0001 self . last_queue_check_date = now self . last_queue_check_count = sent Global . CONFIG_MANAGER . message_fetcher_sleep_interval = sleep_time sleep_interval_log_string = f"new sleep_interval = {sleep_time}" Global . LOGGER . debug ( sleep_interval_log_string ) if Global . CONFIG_MANAGER . show_stats : Global . LOGGER . info ( sleep_interval_log_string )
11,739
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L284-L314
[ "def", "run", "(", "uri", ",", "user_entry_point", ",", "args", ",", "env_vars", "=", "None", ",", "wait", "=", "True", ",", "capture_error", "=", "False", ",", "runner", "=", "_runner", ".", "ProcessRunnerType", ",", "extra_opts", "=", "None", ")", ":", "# type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None", "env_vars", "=", "env_vars", "or", "{", "}", "env_vars", "=", "env_vars", ".", "copy", "(", ")", "_files", ".", "download_and_extract", "(", "uri", ",", "user_entry_point", ",", "_env", ".", "code_dir", ")", "install", "(", "user_entry_point", ",", "_env", ".", "code_dir", ",", "capture_error", ")", "_env", ".", "write_env_vars", "(", "env_vars", ")", "return", "_runner", ".", "get", "(", "runner", ",", "user_entry_point", ",", "args", ",", "env_vars", ",", "extra_opts", ")", ".", "run", "(", "wait", ",", "capture_error", ")" ]
Set the configuration for the Logger
def _parse_input_parameters ( self ) : Global . LOGGER . debug ( "define and parsing command line arguments" ) parser = argparse . ArgumentParser ( description = 'A workflow engine for Pythonistas' , formatter_class = argparse . RawTextHelpFormatter ) parser . add_argument ( 'FILENAME' , nargs = '+' , help = 'name of the recipe file(s)' ) parser . add_argument ( '-i' , '--INTERVAL' , type = int , default = 500 , metavar = ( 'MS' ) , help = 'perform a cycle each [MS] milliseconds. (default = 500)' ) parser . add_argument ( '-m' , '--MESSAGEINTERVAL' , type = int , metavar = ( 'X' ) , help = 'dequeue a message each [X] tenth of milliseconds. (default = auto)' ) parser . add_argument ( '-s' , '--STATS' , type = int , default = 0 , metavar = ( 'SEC' ) , help = 'show stats each [SEC] seconds. (default = NO STATS)' ) parser . add_argument ( '-t' , '--TRACE' , action = 'store_true' , help = 'enable super verbose output, only useful for tracing' ) parser . add_argument ( '-v' , '--VERBOSE' , action = 'store_true' , help = 'enable verbose output' ) parser . add_argument ( '-V' , '--VERSION' , action = "version" , version = __version__ ) args = parser . parse_args ( ) return args
11,740
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L316-L340
[ "def", "createAltHistoryPlot", "(", "self", ")", ":", "self", ".", "altHistRect", "=", "patches", ".", "Rectangle", "(", "(", "self", ".", "leftPos", "+", "(", "self", ".", "vertSize", "/", "10.0", ")", ",", "-", "0.25", ")", ",", "0.5", ",", "0.5", ",", "facecolor", "=", "'grey'", ",", "edgecolor", "=", "'none'", ",", "alpha", "=", "0.4", ",", "zorder", "=", "4", ")", "self", ".", "axes", ".", "add_patch", "(", "self", ".", "altHistRect", ")", "self", ".", "altPlot", ",", "=", "self", ".", "axes", ".", "plot", "(", "[", "self", ".", "leftPos", "+", "(", "self", ".", "vertSize", "/", "10.0", ")", ",", "self", ".", "leftPos", "+", "(", "self", ".", "vertSize", "/", "10.0", ")", "+", "0.5", "]", ",", "[", "0.0", ",", "0.0", "]", ",", "color", "=", "'k'", ",", "marker", "=", "None", ",", "zorder", "=", "4", ")", "self", ".", "altMarker", ",", "=", "self", ".", "axes", ".", "plot", "(", "self", ".", "leftPos", "+", "(", "self", ".", "vertSize", "/", "10.0", ")", "+", "0.5", ",", "0.0", ",", "marker", "=", "'o'", ",", "color", "=", "'k'", ",", "zorder", "=", "4", ")", "self", ".", "altText2", "=", "self", ".", "axes", ".", "text", "(", "self", ".", "leftPos", "+", "(", "4", "*", "self", ".", "vertSize", "/", "10.0", ")", "+", "0.5", ",", "0.0", ",", "'%.f m'", "%", "self", ".", "relAlt", ",", "color", "=", "'k'", ",", "size", "=", "self", ".", "fontSize", ",", "ha", "=", "'left'", ",", "va", "=", "'center'", ",", "zorder", "=", "4", ")" ]
Set time_out field of all flagged timesheet entries to Null .
def migrate_050_to_051 ( session ) : entries_to_update = session . query ( Entry ) . filter ( Entry . forgot_sign_out . is_ ( True ) ) . filter ( Entry . time_out . isnot ( None ) ) for entry in entries_to_update : entry . time_out = None logging . info ( 'Entry updated {}' . format ( entry . uuid ) ) logging . debug ( entry . uuid ) session . add ( entry )
11,741
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/scripts/chronophore_migrate.py#L17-L29
[ "def", "_kpatch", "(", "url", ",", "data", ")", ":", "# Prepare headers", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json-patch+json\"", "}", "# Make request", "ret", "=", "http", ".", "query", "(", "url", ",", "method", "=", "'PATCH'", ",", "header_dict", "=", "headers", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "data", ")", ")", "# Check requests status", "if", "ret", ".", "get", "(", "'error'", ")", ":", "log", ".", "error", "(", "\"Got an error: %s\"", ",", "ret", ".", "get", "(", "\"error\"", ")", ")", "return", "ret", "else", ":", "return", "salt", ".", "utils", ".", "json", ".", "loads", "(", "ret", ".", "get", "(", "'body'", ")", ")" ]
Get all parameters of a task as one string
def get_task_param_string ( task ) : # get dict str -> str from luigi param_dict = task . to_str_params ( ) # sort keys, serialize items = [ ] for key in sorted ( param_dict . keys ( ) ) : items . append ( "'{:s}': '{:s}'" . format ( key , param_dict [ key ] ) ) return "{" + ", " . join ( items ) + "}"
11,742
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L18-L32
[ "def", "compare_operands", "(", "self", ",", "p_operand1", ",", "p_operand2", ")", ":", "if", "self", ".", "operator", "==", "'<'", ":", "return", "p_operand1", "<", "p_operand2", "elif", "self", ".", "operator", "==", "'<='", ":", "return", "p_operand1", "<=", "p_operand2", "elif", "self", ".", "operator", "==", "'='", ":", "return", "p_operand1", "==", "p_operand2", "elif", "self", ".", "operator", "==", "'>='", ":", "return", "p_operand1", ">=", "p_operand2", "elif", "self", ".", "operator", "==", "'>'", ":", "return", "p_operand1", ">", "p_operand2", "elif", "self", ".", "operator", "==", "'!'", ":", "return", "p_operand1", "!=", "p_operand2", "return", "False" ]
Recursively check if a task and all its requirements are complete
def check_completion ( task , mark_incomplete = False , clear = False , return_stats = False ) : # run recursive task checking, get stats to_clear = dict ( ) is_complete , stats = _check_completion ( task , mark_incomplete = mark_incomplete , clear = clear , stats = { } , visited = dict ( ) , to_clear = to_clear ) # task clearing needs to happen top-down: because of foreign key constraints, a task can # only be cleared once all tasks that require it have been cleared while to_clear : # find all tasks that we can currently clear - tasks not required by other tasks; # iterate over list of keys to be able to modify dict while iterating found_clearable_task = False for task_id in list ( to_clear . keys ( ) ) : v = to_clear [ task_id ] if not v [ 'required_by' ] : # this is a task that can be cleared - no other task requires it found_clearable_task = True task = v [ 'task' ] if isinstance ( task , ORMTask ) : task . mark_incomplete ( ) task . clear ( ) _increment_stats ( stats , 'Cleared' ) config . logger . info ( "Cleared task: " + task_id ) else : config . logger . info ( 'Cannot clear task, not an ORMTask: ' + task_id ) # remove the task from the list of tasks that need clearing, remove references # in the required_by lists of all other tasks; this is not an efficient implementation, # O(n^2), could be made O(n) using lookup tables of the task graph del to_clear [ task_id ] for w in to_clear . values ( ) : w [ 'required_by' ] . discard ( task_id ) if not found_clearable_task : raise RuntimeError ( "Error in recursive task clearing, no clearable task found" ) config . logger . info ( "Task completion checking, summary:\n" + str ( stats ) ) if return_stats : return is_complete , stats else : return is_complete
11,743
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L191-L257
[ "def", "register_dataframe_method", "(", "method", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "class", "AccessorMethod", "(", "object", ")", ":", "def", "__init__", "(", "self", ",", "pandas_obj", ")", ":", "self", ".", "_obj", "=", "pandas_obj", "@", "wraps", "(", "method", ")", "def", "__call__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "method", "(", "self", ".", "_obj", ",", "*", "args", ",", "*", "*", "kwargs", ")", "register_dataframe_accessor", "(", "method", ".", "__name__", ")", "(", "AccessorMethod", ")", "return", "method", "return", "inner", "(", ")" ]
Instantiate the task and build it with luigi
def build ( cls , local_scheduler = True , * * task_params ) : luigi . build ( [ cls ( * * task_params ) ] , local_scheduler = local_scheduler )
11,744
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L63-L70
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
Delete all objects created by this task .
def clear ( self # type: ORMTask ) : # mark this task as incomplete self . mark_incomplete ( ) # delete objects for object_class in self . object_classes : self . session . query ( object_class ) . delete ( ) self . close_session ( )
11,745
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L347-L361
[ "def", "get_sampleS", "(", "self", ",", "res", ",", "DS", "=", "None", ",", "resMode", "=", "'abs'", ",", "ind", "=", "None", ",", "offsetIn", "=", "0.", ",", "Out", "=", "'(X,Y,Z)'", ",", "Ind", "=", "None", ")", ":", "if", "Ind", "is", "not", "None", ":", "assert", "self", ".", "dgeom", "[", "'Multi'", "]", "kwdargs", "=", "dict", "(", "DS", "=", "DS", ",", "dSMode", "=", "resMode", ",", "ind", "=", "ind", ",", "DIn", "=", "offsetIn", ",", "VIn", "=", "self", ".", "dgeom", "[", "'VIn'", "]", ",", "VType", "=", "self", ".", "Id", ".", "Type", ",", "VLim", "=", "np", ".", "ascontiguousarray", "(", "self", ".", "Lim", ")", ",", "nVLim", "=", "self", ".", "noccur", ",", "Out", "=", "Out", ",", "margin", "=", "1.e-9", ",", "Multi", "=", "self", ".", "dgeom", "[", "'Multi'", "]", ",", "Ind", "=", "Ind", ")", "args", "=", "[", "self", ".", "Poly", ",", "self", ".", "dgeom", "[", "'P1Min'", "]", "[", "0", "]", ",", "self", ".", "dgeom", "[", "'P1Max'", "]", "[", "0", "]", ",", "self", ".", "dgeom", "[", "'P2Min'", "]", "[", "1", "]", ",", "self", ".", "dgeom", "[", "'P2Max'", "]", "[", "1", "]", ",", "res", "]", "pts", ",", "dS", ",", "ind", ",", "reseff", "=", "_comp", ".", "_Ves_get_sampleS", "(", "*", "args", ",", "*", "*", "kwdargs", ")", "return", "pts", ",", "dS", ",", "ind", ",", "reseff" ]
Task is complete if completion marker is set and all requirements are complete
def complete ( self ) : is_complete = super ( ORMWrapperTask , self ) . complete ( ) for req in self . requires ( ) : is_complete &= req . complete ( ) return is_complete
11,746
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L390-L397
[ "def", "dropout_with_broadcast_dims", "(", "x", ",", "keep_prob", ",", "broadcast_dims", "=", "None", ",", "*", "*", "kwargs", ")", ":", "assert", "\"noise_shape\"", "not", "in", "kwargs", "if", "broadcast_dims", ":", "shape", "=", "tf", ".", "shape", "(", "x", ")", "ndims", "=", "len", "(", "x", ".", "get_shape", "(", ")", ")", "# Allow dimensions like \"-1\" as well.", "broadcast_dims", "=", "[", "dim", "+", "ndims", "if", "dim", "<", "0", "else", "dim", "for", "dim", "in", "broadcast_dims", "]", "kwargs", "[", "\"noise_shape\"", "]", "=", "[", "1", "if", "i", "in", "broadcast_dims", "else", "shape", "[", "i", "]", "for", "i", "in", "range", "(", "ndims", ")", "]", "return", "tf", ".", "nn", ".", "dropout", "(", "x", ",", "keep_prob", ",", "*", "*", "kwargs", ")" ]
Saves a constructed Morse - Smale Complex in json file
def save ( self , filename = None ) : if filename is None : filename = "morse_smale_complex.json" with open ( filename , "w" ) as fp : fp . write ( self . to_json ( ) )
11,747
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L159-L168
[ "def", "clear_all", "(", "self", ")", ":", "keys", "=", "self", ".", "_analytics_backend", ".", "keys", "(", ")", "for", "key", "in", "itertools", ".", "chain", "(", "*", "keys", ")", ":", "with", "self", ".", "_analytics_backend", ".", "map", "(", ")", "as", "conn", ":", "if", "key", ".", "startswith", "(", "self", ".", "_prefix", ")", ":", "conn", ".", "delete", "(", "key", ")" ]
Returns the label pair indices requested by the user
def get_label ( self , indices = None ) : if indices is None : indices = list ( range ( 0 , self . get_sample_size ( ) ) ) elif isinstance ( indices , collections . Iterable ) : indices = sorted ( list ( set ( indices ) ) ) else : indices = [ indices ] if len ( indices ) == 0 : return [ ] partitions = self . get_partitions ( self . persistence ) labels = self . X . shape [ 0 ] * [ None ] for label , partition_indices in partitions . items ( ) : for idx in np . intersect1d ( partition_indices , indices ) : labels [ idx ] = label labels = np . array ( labels ) if len ( indices ) == 1 : return labels [ indices ] [ 0 ] return labels [ indices ]
11,748
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L316-L341
[ "def", "delete", "(", "self", ")", ":", "try", ":", "if", "self", ".", "exists", "(", ")", "is", "False", ":", "return", "None", "self", ".", "db", ".", "session", ".", "delete", "(", "self", ")", "self", ".", "db", ".", "session", ".", "commit", "(", ")", "except", "(", "Exception", ",", "BaseException", ")", "as", "error", ":", "# fail silently\r", "return", "None" ]
Returns the number of samples in the input data
def get_sample_size ( self , key = None ) : if key is None : return len ( self . Y ) else : return len ( self . get_partitions ( self . persistence ) [ key ] )
11,749
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L354-L365
[ "def", "_get_cursor", "(", "self", ",", "n_retries", "=", "1", ")", ":", "n_tries_rem", "=", "n_retries", "+", "1", "while", "n_tries_rem", ">", "0", ":", "try", ":", "conn", "=", "self", ".", "_pool", ".", "getconn", "(", ")", "if", "self", ".", "pooling", "else", "self", ".", "_conn", "# autocommit=True obviates closing explicitly", "conn", ".", "autocommit", "=", "True", "cur", "=", "conn", ".", "cursor", "(", "cursor_factory", "=", "psycopg2", ".", "extras", ".", "DictCursor", ")", "cur", ".", "execute", "(", "\"set search_path = {self.url.schema};\"", ".", "format", "(", "self", "=", "self", ")", ")", "yield", "cur", "# contextmanager executes these when context exits", "cur", ".", "close", "(", ")", "if", "self", ".", "pooling", ":", "self", ".", "_pool", ".", "putconn", "(", "conn", ")", "break", "except", "psycopg2", ".", "OperationalError", ":", "_logger", ".", "warning", "(", "\"Lost connection to {url}; attempting reconnect\"", ".", "format", "(", "url", "=", "self", ".", "url", ")", ")", "if", "self", ".", "pooling", ":", "self", ".", "_pool", ".", "closeall", "(", ")", "self", ".", "_connect", "(", ")", "_logger", ".", "warning", "(", "\"Reconnected to {url}\"", ".", "format", "(", "url", "=", "self", ".", "url", ")", ")", "n_tries_rem", "-=", "1", "else", ":", "# N.B. Probably never reached", "raise", "HGVSError", "(", "\"Permanently lost connection to {url} ({n} retries)\"", ".", "format", "(", "url", "=", "self", ".", "url", ",", "n", "=", "n_retries", ")", ")" ]
Writes the complete Morse - Smale merge hierarchy to a string object .
def to_json ( self ) : capsule = { } capsule [ "Hierarchy" ] = [ ] for ( dying , ( persistence , surviving , saddle ) , ) in self . merge_sequence . items ( ) : capsule [ "Hierarchy" ] . append ( { "Dying" : dying , "Persistence" : persistence , "Surviving" : surviving , "Saddle" : saddle , } ) capsule [ "Partitions" ] = [ ] base = np . array ( [ None , None ] * len ( self . Y ) ) . reshape ( - 1 , 2 ) for ( min_index , max_index ) , items in self . base_partitions . items ( ) : base [ items , : ] = [ min_index , max_index ] capsule [ "Partitions" ] = base . tolist ( ) return json . dumps ( capsule )
11,750
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L382-L408
[ "def", "box_mask", "(", "self", ",", "box", ")", ":", "if", "not", "isinstance", "(", "box", ",", "Box", ")", ":", "raise", "ValueError", "(", "'Must provide Box object'", ")", "if", "box", ".", "frame", "!=", "self", ".", "frame", ":", "raise", "ValueError", "(", "'Box must be in same frame as PointCloud'", ")", "all_points", "=", "self", ".", "data", ".", "T", "cond1", "=", "np", ".", "all", "(", "box", ".", "min_pt", "<=", "all_points", ",", "axis", "=", "1", ")", "cond2", "=", "np", ".", "all", "(", "all_points", "<=", "box", ".", "max_pt", ",", "axis", "=", "1", ")", "valid_point_indices", "=", "np", ".", "where", "(", "np", ".", "logical_and", "(", "cond1", ",", "cond2", ")", ")", "[", "0", "]", "valid_points", "=", "all_points", "[", "valid_point_indices", "]", "return", "PointCloud", "(", "valid_points", ".", "T", ",", "self", ".", "frame", ")", ",", "valid_point_indices" ]
Convert a dict of 1d array to a numpy recarray
def dict_to_numpy_array ( d ) : return fromarrays ( d . values ( ) , np . dtype ( [ ( str ( k ) , v . dtype ) for k , v in d . items ( ) ] ) )
11,751
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/utils.py#L23-L27
[ "def", "shutdown", "(", "self", ")", ":", "vm", "=", "self", ".", "get_vm_failfast", "(", "self", ".", "config", "[", "'name'", "]", ")", "if", "vm", ".", "runtime", ".", "powerState", "==", "vim", ".", "VirtualMachinePowerState", ".", "poweredOff", ":", "print", "(", "\"%s already poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "if", "self", ".", "guestToolsRunning", "(", "vm", ")", ":", "timeout_minutes", "=", "10", "print", "(", "\"waiting for %s to shutdown \"", "\"(%s minutes before forced powerOff)\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "vm", ".", "ShutdownGuest", "(", ")", "if", "self", ".", "WaitForVirtualMachineShutdown", "(", "vm", ",", "timeout_minutes", "*", "60", ")", ":", "print", "(", "\"shutdown complete\"", ")", "print", "(", "\"%s poweredOff\"", "%", "vm", ".", "name", ")", "else", ":", "print", "(", "\"%s has not shutdown after %s minutes:\"", "\"will powerOff\"", "%", "(", "vm", ".", "name", ",", "str", "(", "timeout_minutes", ")", ")", ")", "self", ".", "powerOff", "(", ")", "else", ":", "print", "(", "\"GuestTools not running or not installed: will powerOff\"", ")", "self", ".", "powerOff", "(", ")" ]
Concatenate 1D numpy arrays . Similar to np . concatenate but work with empty input and masked arrays .
def concatenate_1d ( arrays ) : if len ( arrays ) == 0 : return np . array ( [ ] ) if len ( arrays ) == 1 : return np . asanyarray ( arrays [ 0 ] ) if any ( map ( np . ma . is_masked , arrays ) ) : return np . ma . concatenate ( arrays ) return np . concatenate ( arrays )
11,752
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/utils.py#L29-L40
[ "def", "pttl", "(", "self", ",", "name", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "return", "pipe", ".", "pttl", "(", "self", ".", "redis_key", "(", "name", ")", ")" ]
Chemical formula HTML
def formula_html ( self , reversed_ = False ) : if self . H_count == 1 : text = "H" elif self . H_count > 1 : text = "H<sub>{}</sub>" . format ( self . H_count ) else : text = "" seq = [ self . symbol , text , self . charge_sign_html ( ) ] if reversed_ : seq = reversed ( seq ) return "" . join ( seq )
11,753
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/atom.py#L123-L138
[ "def", "read", "(", "self", ")", ":", "line", "=", "self", ".", "trace_file", ".", "readline", "(", ")", "if", "line", "==", "''", ":", "if", "self", ".", "loop", ":", "self", ".", "_reopen_file", "(", ")", "else", ":", "self", ".", "trace_file", ".", "close", "(", ")", "self", ".", "trace_file", "=", "None", "raise", "DataSourceError", "(", ")", "message", "=", "JsonFormatter", ".", "deserialize", "(", "line", ")", "timestamp", "=", "message", ".", "get", "(", "'timestamp'", ",", "None", ")", "if", "self", ".", "realtime", "and", "timestamp", "is", "not", "None", ":", "self", ".", "_store_timestamp", "(", "timestamp", ")", "self", ".", "_wait", "(", "self", ".", "starting_time", ",", "self", ".", "first_timestamp", ",", "timestamp", ")", "return", "line", "+", "\"\\x00\"" ]
Charge sign text
def charge_sign ( self ) : if self . charge > 0 : sign = "+" elif self . charge < 0 : sign = "–" en dash, not hyphen-minus else : return "" ab = abs ( self . charge ) if ab > 1 : return str ( ab ) + sign return sign
11,754
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/atom.py#L154-L165
[ "def", "revoke_session", "(", ")", ":", "form", "=", "RevokeForm", "(", "request", ".", "form", ")", "if", "not", "form", ".", "validate_on_submit", "(", ")", ":", "abort", "(", "403", ")", "sid_s", "=", "form", ".", "data", "[", "'sid_s'", "]", "if", "SessionActivity", ".", "query", ".", "filter_by", "(", "user_id", "=", "current_user", ".", "get_id", "(", ")", ",", "sid_s", "=", "sid_s", ")", ".", "count", "(", ")", "==", "1", ":", "delete_session", "(", "sid_s", "=", "sid_s", ")", "db", ".", "session", ".", "commit", "(", ")", "if", "not", "SessionActivity", ".", "is_current", "(", "sid_s", "=", "sid_s", ")", ":", "# if it's the same session doesn't show the message, otherwise", "# the session will be still open without the database record", "flash", "(", "'Session {0} successfully removed.'", ".", "format", "(", "sid_s", ")", ",", "'success'", ")", "else", ":", "flash", "(", "'Unable to remove the session {0}.'", ".", "format", "(", "sid_s", ")", ",", "'error'", ")", "return", "redirect", "(", "url_for", "(", "'invenio_accounts.security'", ")", ")" ]
Dispatch a message using 0mq
def send_message ( self , message ) : with self . _instance_lock : if message is None : Global . LOGGER . error ( "can't deliver a null messages" ) return if message . sender is None : Global . LOGGER . error ( f"can't deliver anonymous messages with body {message.body}" ) return if message . receiver is None : Global . LOGGER . error ( f"can't deliver message from {message.sender}: recipient not specified" ) return if message . message is None : Global . LOGGER . error ( f"can't deliver message with no body from {message.sender}" ) return sender = "*" + message . sender + "*" self . socket . send_multipart ( [ bytes ( sender , 'utf-8' ) , pickle . dumps ( message ) ] ) if Global . CONFIG_MANAGER . tracing_mode : Global . LOGGER . debug ( "dispatched : " + message . sender + "-" + message . message + "-" + message . receiver ) self . dispatched = self . dispatched + 1
11,755
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/MessageDispatcher.py#L84-L118
[ "def", "tag_pos_volume", "(", "line", ")", ":", "def", "tagger", "(", "match", ")", ":", "groups", "=", "match", ".", "groupdict", "(", ")", "try", ":", "year", "=", "match", ".", "group", "(", "'year'", ")", "except", "IndexError", ":", "# Extract year from volume name", "# which should always include the year", "g", "=", "re", ".", "search", "(", "re_pos_year_num", ",", "match", ".", "group", "(", "'volume_num'", ")", ",", "re", ".", "UNICODE", ")", "year", "=", "g", ".", "group", "(", "0", ")", "if", "year", ":", "groups", "[", "'year'", "]", "=", "' <cds.YR>(%s)</cds.YR>'", "%", "year", ".", "strip", "(", ")", ".", "strip", "(", "'()'", ")", "else", ":", "groups", "[", "'year'", "]", "=", "''", "return", "'<cds.JOURNAL>PoS</cds.JOURNAL>'", "' <cds.VOL>%(volume_name)s%(volume_num)s</cds.VOL>'", "'%(year)s'", "' <cds.PG>%(page)s</cds.PG>'", "%", "groups", "for", "p", "in", "re_pos", ":", "line", "=", "p", ".", "sub", "(", "tagger", ",", "line", ")", "return", "line" ]
Property cache actualization at POI save . It will not work yet after property removal .
def update_properties_cache ( sender , instance , action , reverse , model , pk_set , * * kwargs ) : if action == 'post_add' : instance . save_properties_cache ( )
11,756
https://github.com/auto-mat/django-webmap-corpus/blob/1d8b7428d2bf3b1165985d767b19677bb6db9eae/webmap/models.py#L236-L239
[ "def", "get_train_eval_files", "(", "input_dir", ")", ":", "data_dir", "=", "_get_latest_data_dir", "(", "input_dir", ")", "train_pattern", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "'train*.tfrecord.gz'", ")", "eval_pattern", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "'eval*.tfrecord.gz'", ")", "train_files", "=", "file_io", ".", "get_matching_files", "(", "train_pattern", ")", "eval_files", "=", "file_io", ".", "get_matching_files", "(", "eval_pattern", ")", "return", "train_files", ",", "eval_files" ]
Writes the complete Morse complex merge hierarchy to a string object .
def to_json ( self ) : capsule = { } capsule [ "Hierarchy" ] = [ ] for ( dying , ( persistence , surviving , saddle ) , ) in self . merge_sequence . items ( ) : capsule [ "Hierarchy" ] . append ( { "Persistence" : persistence , "Dying" : dying , "Surviving" : surviving , "Saddle" : saddle , } ) capsule [ "Partitions" ] = [ ] base = np . array ( [ None ] * len ( self . Y ) ) for label , items in self . base_partitions . items ( ) : base [ items ] = label capsule [ "Partitions" ] = base . tolist ( ) return json . dumps ( capsule , separators = ( "," , ":" ) )
11,757
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseComplex.py#L380-L406
[ "def", "checkIfAvailable", "(", "self", ",", "dateTime", "=", "timezone", ".", "now", "(", ")", ")", ":", "return", "(", "self", ".", "startTime", ">=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__closeBookingDays'", ")", ")", "and", "self", ".", "startTime", "<=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__openBookingDays'", ")", ")", "and", "not", "self", ".", "eventRegistration", "and", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "available", "or", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "tentative", "and", "getattr", "(", "getattr", "(", "self", ".", "temporaryEventRegistration", ",", "'registration'", ",", "None", ")", ",", "'expirationDate'", ",", "timezone", ".", "now", "(", ")", ")", "<=", "timezone", ".", "now", "(", ")", ")", ")", ")" ]
Iter to list all the jobs events .
def iter ( context , sequence , limit = 10 ) : params = { 'limit' : limit , 'offset' : 0 } uri = '%s/%s/%s' % ( context . dci_cs_api , RESOURCE , sequence ) while True : j = context . session . get ( uri , params = params ) . json ( ) if len ( j [ 'jobs_events' ] ) : for i in j [ 'jobs_events' ] : yield i else : break params [ 'offset' ] += params [ 'limit' ]
11,758
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/jobs_events.py#L30-L43
[ "def", "clear_copyright", "(", "self", ")", ":", "# Implemented from template for osid.repository.AssetForm.clear_title_template", "if", "(", "self", ".", "get_copyright_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_copyright_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAccess", "(", ")", "self", ".", "_my_map", "[", "'copyright'", "]", "=", "dict", "(", "self", ".", "_copyright_default", ")" ]
Delete jobs events from a given sequence
def delete ( context , sequence ) : uri = '%s/%s/%s' % ( context . dci_cs_api , RESOURCE , sequence ) return context . session . delete ( uri )
11,759
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/jobs_events.py#L46-L49
[ "def", "saturation", "(", "self", ",", "value", ")", ":", "value", "=", "clean_float", "(", "value", ")", "if", "value", "is", "None", ":", "return", "try", ":", "unit_moisture_weight", "=", "self", ".", "unit_moist_weight", "-", "self", ".", "unit_dry_weight", "unit_moisture_volume", "=", "unit_moisture_weight", "/", "self", ".", "_pw", "saturation", "=", "unit_moisture_volume", "/", "self", ".", "_calc_unit_void_volume", "(", ")", "if", "saturation", "is", "not", "None", "and", "not", "ct", ".", "isclose", "(", "saturation", ",", "value", ",", "rel_tol", "=", "self", ".", "_tolerance", ")", ":", "raise", "ModelError", "(", "\"New saturation (%.3f) is inconsistent \"", "\"with calculated value (%.3f)\"", "%", "(", "value", ",", "saturation", ")", ")", "except", "TypeError", ":", "pass", "old_value", "=", "self", ".", "saturation", "self", ".", "_saturation", "=", "value", "try", ":", "self", ".", "recompute_all_weights_and_void", "(", ")", "self", ".", "_add_to_stack", "(", "\"saturation\"", ",", "value", ")", "except", "ModelError", "as", "e", ":", "self", ".", "_saturation", "=", "old_value", "raise", "ModelError", "(", "e", ")" ]
Returns the ldap module . The unit test harness will assign a mock object to _LDAPConfig . ldap . It is imperative that the ldap module not be imported anywhere else so that the unit tests will pass in the absence of python - ldap .
def get_ldap ( cls , global_options = None ) : if cls . ldap is None : import ldap . filter # Support for python-ldap < 2.0.6 try : import ldap . dn except ImportError : from django_auth_ldap import dn ldap . dn = dn cls . ldap = ldap # Apply global LDAP options once if ( not cls . _ldap_configured ) and ( global_options is not None ) : for opt , value in global_options . items ( ) : cls . ldap . set_option ( opt , value ) cls . _ldap_configured = True return cls . ldap
11,760
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L52-L78
[ "def", "kill", "(", "self", ",", "dwExitCode", "=", "0", ")", ":", "hProcess", "=", "self", ".", "get_handle", "(", "win32", ".", "PROCESS_TERMINATE", ")", "win32", ".", "TerminateProcess", "(", "hProcess", ",", "dwExitCode", ")" ]
Begins an asynchronous search and returns the message id to retrieve the results .
def _begin ( self , connection , filterargs = ( ) , escape = True ) : if escape : filterargs = self . _escape_filterargs ( filterargs ) try : filterstr = self . filterstr % filterargs msgid = connection . search ( force_str ( self . base_dn ) , self . scope , force_str ( filterstr ) ) except ldap . LDAPError as e : msgid = None logger . error ( u"search('%s', %d, '%s') raised %s" % ( self . base_dn , self . scope , filterstr , pprint . pformat ( e ) ) ) return msgid
11,761
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L170-L191
[ "def", "unit_ball_L2", "(", "shape", ")", ":", "x", "=", "tf", ".", "Variable", "(", "tf", ".", "zeros", "(", "shape", ")", ")", "return", "constrain_L2", "(", "x", ")" ]
Returns the result of a previous asynchronous query .
def _results ( self , connection , msgid ) : try : kind , results = connection . result ( msgid ) if kind != ldap . RES_SEARCH_RESULT : results = [ ] except ldap . LDAPError as e : results = [ ] logger . error ( u"result(%d) raised %s" % ( msgid , pprint . pformat ( e ) ) ) return self . _process_results ( results )
11,762
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L193-L205
[ "def", "assert_condition_md5", "(", "self", ")", ":", "if", "'Content-MD5'", "in", "self", ".", "request", ".", "headers", ":", "body_md5", "=", "hashlib", ".", "md5", "(", "self", ".", "request", ".", "body", ")", ".", "hexdigest", "(", ")", "if", "body_md5", "!=", "self", ".", "request", ".", "headers", "[", "'Content-MD5'", "]", ":", "raise_400", "(", "self", ",", "msg", "=", "'Invalid Content-MD5 request header.'", ")" ]
Escapes values in filterargs .
def _escape_filterargs ( self , filterargs ) : if isinstance ( filterargs , tuple ) : filterargs = tuple ( self . ldap . filter . escape_filter_chars ( value ) for value in filterargs ) elif isinstance ( filterargs , dict ) : filterargs = dict ( ( key , self . ldap . filter . escape_filter_chars ( value ) ) for key , value in filterargs . items ( ) ) else : raise TypeError ( "filterargs must be a tuple or dict." ) return filterargs
11,763
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L207-L225
[ "def", "aux", "(", "self", ",", "aux", ")", ":", "if", "aux", "==", "self", ".", "_aux", ":", "return", "if", "self", ".", "_aux", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_aux", ",", "self", ".", "_project", ")", "self", ".", "_aux", "=", "None", "if", "aux", "is", "not", "None", ":", "self", ".", "_aux", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "aux", ",", "self", ".", "_project", ")", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: aux port set to {port}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "port", "=", "aux", ")", ")" ]
Returns a sanitized copy of raw LDAP results . This scrubs out references decodes utf8 normalizes DNs etc .
def _process_results ( self , results ) : results = [ r for r in results if r [ 0 ] is not None ] results = _DeepStringCoder ( 'utf-8' ) . decode ( results ) # The normal form of a DN is lower case. results = [ ( r [ 0 ] . lower ( ) , r [ 1 ] ) for r in results ] result_dns = [ result [ 0 ] for result in results ] logger . debug ( u"search_s('%s', %d, '%s') returned %d objects: %s" % ( self . base_dn , self . scope , self . filterstr , len ( result_dns ) , "; " . join ( result_dns ) ) ) return results
11,764
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L227-L243
[ "def", "write_result_stream", "(", "result_stream", ",", "filename_prefix", "=", "None", ",", "results_per_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "result_stream", ",", "types", ".", "GeneratorType", ")", ":", "stream", "=", "result_stream", "else", ":", "stream", "=", "result_stream", ".", "stream", "(", ")", "file_time_formatter", "=", "\"%Y-%m-%dT%H_%M_%S\"", "if", "filename_prefix", "is", "None", ":", "filename_prefix", "=", "\"twitter_search_results\"", "if", "results_per_file", ":", "logger", ".", "info", "(", "\"chunking result stream to files with {} tweets per file\"", ".", "format", "(", "results_per_file", ")", ")", "chunked_stream", "=", "partition", "(", "stream", ",", "results_per_file", ",", "pad_none", "=", "True", ")", "for", "chunk", "in", "chunked_stream", ":", "chunk", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", ",", "chunk", ")", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}_{}.json\"", ".", "format", "(", "filename_prefix", ",", "curr_datetime", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "chunk", ")", "else", ":", "curr_datetime", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "file_time_formatter", ")", ")", "_filename", "=", "\"{}.json\"", ".", "format", "(", "filename_prefix", ")", "yield", "from", "write_ndjson", "(", "_filename", ",", "stream", ")" ]
Get a database connection string
def get_connection_string ( params , hide_password = True ) : connection_string = params [ 'driver' ] + '://' user = params . get ( 'user' , None ) password = params . get ( 'password' , None ) host = params . get ( 'host' , None ) port = params . get ( 'port' , None ) database = params . get ( 'database' , None ) if database is None : raise ValueError ( "Field 'database' of connection parameters cannot be None." ) # if password is not set, try to get it from keyring if password is None and user is not None : # noinspection PyTypeChecker password = Client . _get_password ( params ) if password is None : raise RuntimeError ( "Password not defined and not available in keyring." ) # don't add host/port/user/password if no host given if host is not None : # don't add user/password if user not given if user is not None : connection_string += user # omit zero-length passwords if len ( password ) > 0 : if hide_password : connection_string += ":[password hidden]" else : connection_string += ":" + password connection_string += "@" connection_string += host if port is not None : connection_string += ':' + str ( port ) # noinspection PyTypeChecker connection_string += '/' + database return connection_string
11,765
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/client.py#L133-L187
[ "def", "_upload", "(", "self", ",", "obj_name", ",", "content", ",", "content_type", ",", "content_encoding", ",", "content_length", ",", "etag", ",", "chunked", ",", "chunk_size", ",", "headers", ")", ":", "if", "content_type", "is", "not", "None", ":", "headers", "[", "\"Content-Type\"", "]", "=", "content_type", "if", "content_encoding", "is", "not", "None", ":", "headers", "[", "\"Content-Encoding\"", "]", "=", "content_encoding", "if", "isinstance", "(", "content", ",", "six", ".", "string_types", ")", ":", "fsize", "=", "len", "(", "content", ")", "else", ":", "if", "chunked", ":", "fsize", "=", "None", "elif", "content_length", "is", "None", ":", "fsize", "=", "get_file_size", "(", "content", ")", "else", ":", "fsize", "=", "content_length", "if", "fsize", "is", "None", "or", "fsize", "<=", "MAX_FILE_SIZE", ":", "# We can just upload it as-is.", "return", "self", ".", "_store_object", "(", "obj_name", ",", "content", "=", "content", ",", "etag", "=", "etag", ",", "chunked", "=", "chunked", ",", "chunk_size", "=", "chunk_size", ",", "headers", "=", "headers", ")", "# Files larger than MAX_FILE_SIZE must be segmented", "# and uploaded separately.", "num_segments", "=", "int", "(", "math", ".", "ceil", "(", "float", "(", "fsize", ")", "/", "MAX_FILE_SIZE", ")", ")", "digits", "=", "int", "(", "math", ".", "log10", "(", "num_segments", ")", ")", "+", "1", "# NOTE: This could be greatly improved with threading or other", "# async design.", "for", "segment", "in", "range", "(", "num_segments", ")", ":", "sequence", "=", "str", "(", "segment", "+", "1", ")", ".", "zfill", "(", "digits", ")", "seg_name", "=", "\"%s.%s\"", "%", "(", "obj_name", ",", "sequence", ")", "with", "utils", ".", "SelfDeletingTempfile", "(", ")", "as", "tmpname", ":", "with", "open", "(", "tmpname", ",", "\"wb\"", ")", "as", "tmp", ":", "tmp", ".", "write", "(", "content", ".", "read", "(", "MAX_FILE_SIZE", ")", ")", "with", "open", "(", "tmpname", ",", "\"rb\"", ")", "as", "tmp", ":", "# We have to calculate the etag for each segment", "etag", "=", "utils", ".", "get_checksum", "(", "tmp", ")", "self", ".", "_store_object", "(", "seg_name", ",", "content", "=", "tmp", ",", "etag", "=", "etag", ",", "chunked", "=", "False", ",", "headers", "=", "headers", ")", "# Upload the manifest", "headers", ".", "pop", "(", "\"ETag\"", ",", "\"\"", ")", "headers", "[", "\"X-Object-Manifest\"", "]", "=", "\"%s/%s.\"", "%", "(", "self", ".", "name", ",", "obj_name", ")", "self", ".", "_store_object", "(", "obj_name", ",", "content", "=", "None", ",", "headers", "=", "headers", ")" ]
Send a request for latest ticker info return the response .
def pubticker ( self , symbol = 'btcusd' ) : url = self . base_url + '/v1/pubticker/' + symbol return requests . get ( url )
11,766
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L50-L54
[ "def", "exclude_types", "(", "self", ",", "*", "objs", ")", ":", "for", "o", "in", "objs", ":", "for", "t", "in", "_keytuple", "(", "o", ")", ":", "if", "t", "and", "t", "not", "in", "self", ".", "_excl_d", ":", "self", ".", "_excl_d", "[", "t", "]", "=", "0" ]
Send a request to get the public order book return the response .
def book ( self , symbol = 'btcusd' , limit_bids = 0 , limit_asks = 0 ) : url = self . base_url + '/v1/book/' + symbol params = { 'limit_bids' : limit_bids , 'limit_asks' : limit_asks } return requests . get ( url , params )
11,767
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L56-L71
[ "def", "_clamp_string", "(", "self", ",", "row_item", ",", "column_index", ",", "delimiter", "=", "''", ")", ":", "width", "=", "(", "self", ".", "_table", ".", "column_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "left_padding_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "right_padding_widths", "[", "column_index", "]", ")", "if", "termwidth", "(", "row_item", ")", "<=", "width", ":", "return", "row_item", "else", ":", "if", "width", "-", "len", "(", "delimiter", ")", ">=", "0", ":", "clamped_string", "=", "(", "textwrap", "(", "row_item", ",", "width", "-", "len", "(", "delimiter", ")", ")", "[", "0", "]", "+", "delimiter", ")", "else", ":", "clamped_string", "=", "delimiter", "[", ":", "width", "]", "return", "clamped_string" ]
Send a request to get all public trades return the response .
def trades ( self , symbol = 'btcusd' , since = 0 , limit_trades = 50 , include_breaks = 0 ) : url = self . base_url + '/v1/trades/' + symbol params = { 'since' : since , 'limit_trades' : limit_trades , 'include_breaks' : include_breaks } return requests . get ( url , params )
11,768
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L73-L91
[ "def", "build_synchronize_decorator", "(", ")", ":", "lock", "=", "threading", ".", "Lock", "(", ")", "def", "lock_decorator", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "lock_decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "lock", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "lock_decorated", "return", "lock_decorator" ]
Send a request for latest auction info return the response .
def auction ( self , symbol = 'btcusd' ) : url = self . base_url + '/v1/auction/' + symbol return requests . get ( url )
11,769
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L93-L97
[ "def", "convex_conj", "(", "self", ")", ":", "if", "self", ".", "quadratic_coeff", "==", "0", ":", "cconj", "=", "self", ".", "functional", ".", "convex_conj", ".", "translated", "(", "self", ".", "linear_term", ")", "if", "self", ".", "constant", "!=", "0", ":", "cconj", "=", "cconj", "-", "self", ".", "constant", "return", "cconj", "else", ":", "return", "super", "(", "FunctionalQuadraticPerturb", ",", "self", ")", ".", "convex_conj" ]
Send a request for auction history info return the response .
def auction_history ( self , symbol = 'btcusd' , since = 0 , limit_auction_results = 50 , include_indicative = 1 ) : url = self . base_url + '/v1/auction/' + symbol + '/history' params = { 'since' : since , 'limit_auction_results' : limit_auction_results , 'include_indicative' : include_indicative } return requests . get ( url , params )
11,770
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L99-L119
[ "def", "get_service_references", "(", "self", ",", "clazz", ",", "ldap_filter", "=", "None", ")", ":", "# type: (Optional[str], Optional[str]) -> Optional[List[ServiceReference]]", "refs", "=", "self", ".", "__framework", ".", "find_service_references", "(", "clazz", ",", "ldap_filter", ")", "if", "refs", ":", "for", "ref", "in", "refs", ":", "if", "ref", ".", "get_bundle", "(", ")", "is", "not", "self", ".", "__bundle", ":", "refs", ".", "remove", "(", "ref", ")", "return", "refs" ]
Send a request to place an order return the response .
def new_order ( self , amount , price , side , client_order_id = None , symbol = 'btcusd' , type = 'exchange limit' , options = None ) : request = '/v1/order/new' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'symbol' : symbol , 'amount' : amount , 'price' : price , 'side' : side , 'type' : type } if client_order_id is not None : params [ 'client_order_id' ] = client_order_id if options is not None : params [ 'options' ] = options return requests . post ( url , headers = self . prepare ( params ) )
11,771
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L122-L153
[ "def", "_clamp_string", "(", "self", ",", "row_item", ",", "column_index", ",", "delimiter", "=", "''", ")", ":", "width", "=", "(", "self", ".", "_table", ".", "column_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "left_padding_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "right_padding_widths", "[", "column_index", "]", ")", "if", "termwidth", "(", "row_item", ")", "<=", "width", ":", "return", "row_item", "else", ":", "if", "width", "-", "len", "(", "delimiter", ")", ">=", "0", ":", "clamped_string", "=", "(", "textwrap", "(", "row_item", ",", "width", "-", "len", "(", "delimiter", ")", ")", "[", "0", "]", "+", "delimiter", ")", "else", ":", "clamped_string", "=", "delimiter", "[", ":", "width", "]", "return", "clamped_string" ]
Send a request to cancel an order return the response .
def cancel_order ( self , order_id ) : request = '/v1/order/cancel' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'order_id' : order_id } return requests . post ( url , headers = self . prepare ( params ) )
11,772
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L155-L170
[ "def", "secure", "(", "self", ")", ":", "log", ".", "debug", "(", "'ConCache securing sockets'", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "cache_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "cache_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "update_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "update_sock", ",", "0o600", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "upd_t_sock", ")", ":", "os", ".", "chmod", "(", "self", ".", "upd_t_sock", ",", "0o600", ")" ]
Send a trade history request return the response .
def past_trades ( self , symbol = 'btcusd' , limit_trades = 50 , timestamp = 0 ) : request = '/v1/mytrades' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) , 'symbol' : symbol , 'limit_trades' : limit_trades , 'timestamp' : timestamp } return requests . post ( url , headers = self . prepare ( params ) )
11,773
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L222-L241
[ "def", "sync_agg_metric", "(", "self", ",", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", ":", "self", ".", "sync_week_metric", "(", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", "self", ".", "sync_month_metric", "(", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")" ]
Send a request to get your trade volume return the response .
def tradevolume ( self ) : request = '/v1/tradevolume' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) } return requests . post ( url , headers = self . prepare ( params ) )
11,774
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L243-L252
[ "def", "build_synchronize_decorator", "(", ")", ":", "lock", "=", "threading", ".", "Lock", "(", ")", "def", "lock_decorator", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "lock_decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "lock", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "lock_decorated", "return", "lock_decorator" ]
Send a request for a new cryptocurrency deposit address with an optional label . Return the response .
def newAddress ( self , currency = 'btc' , label = '' ) : request = '/v1/deposit/' + currency + '/newAddress' url = self . base_url + request params = { 'request' : request , 'nonce' : self . get_nonce ( ) } if label != '' : params [ 'label' ] = label return requests . post ( url , headers = self . prepare ( params ) )
11,775
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L265-L284
[ "def", "get_timestamps", "(", "cols", ",", "created_name", ",", "updated_name", ")", ":", "has_created", "=", "created_name", "in", "cols", "has_updated", "=", "updated_name", "in", "cols", "return", "(", "created_name", "if", "has_created", "else", "None", ",", "updated_name", "if", "has_updated", "else", "None", ")" ]
Prepare return the required HTTP headers .
def prepare ( self , params ) : jsonparams = json . dumps ( params ) payload = base64 . b64encode ( jsonparams . encode ( ) ) signature = hmac . new ( self . secret_key . encode ( ) , payload , hashlib . sha384 ) . hexdigest ( ) return { 'X-GEMINI-APIKEY' : self . api_key , 'X-GEMINI-PAYLOAD' : payload , 'X-GEMINI-SIGNATURE' : signature }
11,776
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L301-L318
[ "def", "delete_logs", "(", "room", ")", ":", "from", "indico_chat", ".", "plugin", "import", "ChatPlugin", "base_url", "=", "ChatPlugin", ".", "settings", ".", "get", "(", "'log_url'", ")", "if", "not", "base_url", "or", "room", ".", "custom_server", ":", "return", "try", ":", "response", "=", "requests", ".", "get", "(", "posixpath", ".", "join", "(", "base_url", ",", "'delete'", ")", ",", "params", "=", "{", "'cr'", ":", "room", ".", "jid", "}", ")", ".", "json", "(", ")", "except", "(", "RequestException", ",", "ValueError", ")", ":", "current_plugin", ".", "logger", ".", "exception", "(", "'Could not delete logs for %s'", ",", "room", ".", "jid", ")", "return", "if", "not", "response", ".", "get", "(", "'success'", ")", ":", "current_plugin", ".", "logger", ".", "warning", "(", "'Could not delete logs for %s: %s'", ",", "room", ".", "jid", ",", "response", ".", "get", "(", "'error'", ")", ")" ]
Merge multiple SourceBlocks together
def merge ( cls , source_blocks ) : if len ( source_blocks ) == 1 : return source_blocks [ 0 ] source_blocks . sort ( key = operator . attrgetter ( 'start_line_number' ) ) main_block = source_blocks [ 0 ] boot_lines = main_block . boot_lines source_lines = [ source_line for source_block in source_blocks for source_line in source_block . source_lines ] return cls ( boot_lines , source_lines , directive = main_block . directive , language = main_block . language , roles = main_block . roles )
11,777
https://github.com/kataev/flake8-rst/blob/ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f/flake8_rst/sourceblock.py#L72-L84
[ "def", "_get_partition_info", "(", "storage_system", ",", "device_path", ")", ":", "try", ":", "partition_infos", "=", "storage_system", ".", "RetrieveDiskPartitionInfo", "(", "devicePath", "=", "[", "device_path", "]", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "salt", ".", "exceptions", ".", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "log", ".", "trace", "(", "'partition_info = %s'", ",", "partition_infos", "[", "0", "]", ")", "return", "partition_infos", "[", "0", "]" ]
Export a table listing all characters and their data
def character_summary_table ( ) : # a database client/session to run queries in cl = client . get_client ( ) session = cl . create_session ( ) # Define the query. Note that we need to rename the two joined-in name columns, # to make the labels intelligible and to not have two identical column names in the output. # Also, we need a left outer join on the place of birth (instead of the default left inner join) # if we want results for characters that have no place of birth set. query = session . query ( models . Character , models . Universe . name . label ( 'universe' ) , models . Place . name . label ( 'place_of_birth' ) ) . join ( models . Character . universe ) . outerjoin ( models . Character . place_of_birth ) # download all data as a pandas DataFrame, index it by the character ID characters = cl . df_query ( query ) . set_index ( 'id' ) # query the number of movie appearances per character query = session . query ( sa . func . count ( models . MovieAppearance . id ) . label ( 'movie_appearances' ) , models . MovieAppearance . character_id ) . group_by ( models . MovieAppearance . character_id ) appearances = cl . df_query ( query ) . set_index ( 'character_id' ) # join both tables, sort by name df = characters . join ( appearances , how = 'left' ) . sort_values ( by = 'name' ) # drop the foreign key columns (have no meaning outside our DB) df = df . drop ( [ 'universe_id' , 'place_of_birth_id' ] , axis = 1 ) # write output as both CSV and Excel; do not include index column df . to_csv ( path . join ( out_dir , "characters.csv" ) , encoding = 'utf-8' , index = False ) df . to_excel ( path . join ( out_dir , "characters.xlsx" ) , encoding = 'utf-8' , index = False ) session . close ( )
11,778
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L24-L63
[ "def", "inc", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pairs", "=", "[", "]", "if", "len", "(", "args", ")", "==", "1", ":", "pairs", ".", "append", "(", "(", "args", "[", "0", "]", ",", "1", ")", ")", "elif", "len", "(", "args", ")", "==", "2", ":", "pairs", ".", "append", "(", "args", ")", "elif", "len", "(", "kwargs", ")", "!=", "0", ":", "pairs", ".", "extend", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "]", ")", "else", ":", "raise", "UpdateException", "(", "'Invalid arguments for set. Requires either two positional arguments or at least one keyword argument'", ")", "ret", "=", "self", "for", "qfield", ",", "value", "in", "pairs", ":", "ret", "=", "self", ".", "_atomic_op", "(", "'$inc'", ",", "qfield", ",", "value", ")", "return", "ret" ]
Helper function to convert matplotlib figure to SVG string
def fig_to_svg ( fig ) : buf = io . StringIO ( ) fig . savefig ( buf , format = 'svg' ) buf . seek ( 0 ) return buf . getvalue ( )
11,779
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L66-L75
[ "def", "get_users", "(", "self", ",", "course", ")", ":", "users", "=", "OrderedDict", "(", "sorted", "(", "list", "(", "self", ".", "user_manager", ".", "get_users_info", "(", "self", ".", "user_manager", ".", "get_course_registered_users", "(", "course", ")", ")", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "k", ":", "k", "[", "1", "]", "[", "0", "]", "if", "k", "[", "1", "]", "is", "not", "None", "else", "\"\"", ")", ")", "return", "users" ]
Generate interactive network graph of movie appearances
def movie_network ( ) : # page template template = jenv . get_template ( "movie_network.html" ) # container for template context context = dict ( ) # a database client/session to run queries in cl = client . get_client ( ) session = cl . create_session ( ) # # query data # # get all Movies query = session . query ( models . Movie . id , models . Movie . name , models . Movie . url , models . Movie . budget_inflation_adjusted , models . Movie . imdb_rating ) movies = cl . df_query ( query ) # get all Movie Appearances query = session . query ( models . MovieAppearance . movie_id , models . MovieAppearance . character_id ) appearances = cl . df_query ( query ) # get all Characters that have movie appearances query = session . query ( models . Character . id , models . Character . url , models . Character . name ) . filter ( models . Character . id . in_ ( [ int ( i ) for i in appearances [ 'character_id' ] . unique ( ) ] ) ) characters = cl . df_query ( query ) # # transform to network graph # graph = dict ( nodes = [ ] , graph = [ ] , # this stays empty links = [ ] , directed = False , multigraph = True ) # containers for lookups from movie/character IDs to node IDs movie_node_id = dict ( ) character_node_id = dict ( ) # normalization for movie node size: 100 = max budget movie_size_factor = 100. / movies [ 'budget_inflation_adjusted' ] . max ( ) # nodes for movies for _ , data in movies . iterrows ( ) : movie_node_id [ data [ 'id' ] ] = len ( graph [ 'nodes' ] ) # noinspection PyTypeChecker graph [ 'nodes' ] . append ( dict ( id = data [ 'name' ] , size = max ( 5. , data [ 'budget_inflation_adjusted' ] * movie_size_factor ) , score = data [ 'imdb_rating' ] / 10. , type = 'square' , url = "http://marvel.wikia.com" + data [ 'url' ] ) ) # nodes for characters for _ , data in characters . iterrows ( ) : character_node_id [ data [ 'id' ] ] = len ( graph [ 'nodes' ] ) # noinspection PyTypeChecker graph [ 'nodes' ] . append ( dict ( id = data [ 'name' ] , size = 10 , type = 'circle' , url = "http://marvel.wikia.com" + data [ 'url' ] ) ) # links: movie appearances for _ , data in appearances . iterrows ( ) : # noinspection PyTypeChecker graph [ 'links' ] . append ( dict ( source = movie_node_id [ data [ 'movie_id' ] ] , target = character_node_id [ data [ 'character_id' ] ] ) ) context [ 'graph' ] = json . dumps ( graph , indent = 4 ) # # render template # out_file = path . join ( out_dir , "movie_network.html" ) html_content = template . render ( * * context ) with open ( out_file , 'w' ) as f : f . write ( html_content ) # done, clean up plt . close ( 'all' ) session . close ( )
11,780
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L204-L298
[ "def", "_post_request", "(", "self", ",", "url", ",", "headers", ",", "data", "=", "None", ")", ":", "# Grab file from data.", "files", "=", "None", "for", "field", ",", "value", "in", "data", ":", "if", "field", "==", "'file'", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "files", "=", "value", "else", ":", "files", "=", "{", "'file'", ":", "value", "}", "break", "# Remove file entry from data.", "data", "[", ":", "]", "=", "[", "tup", "for", "tup", "in", "data", "if", "tup", "[", "0", "]", "!=", "'file'", "]", "return", "self", ".", "_session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "files", "=", "files", ")" ]
Helper function for splitting 2D data into x and y component to make equations simpler
def unpack2D ( _x ) : _x = np . atleast_2d ( _x ) x = _x [ : , 0 ] y = _x [ : , 1 ] return x , y
11,781
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/docs/_static/logo_generator.py#L48-L56
[ "def", "verify_ocsp", "(", "cls", ",", "certificate", ",", "issuer", ")", ":", "return", "OCSPVerifier", "(", "certificate", ",", "issuer", ",", "cls", ".", "get_ocsp_url", "(", ")", ",", "cls", ".", "get_ocsp_responder_certificate_path", "(", ")", ")", ".", "verify", "(", ")" ]
Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down .
def is_at_exit ( ) : if _threading_main_thread is not None : if not hasattr ( threading , "main_thread" ) : return True if threading . main_thread ( ) != _threading_main_thread : return True if not _threading_main_thread . is_alive ( ) : return True return False
11,782
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L836-L850
[ "def", "fromgroups", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "p", "=", "OptionParser", "(", "fromgroups", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "groupsfile", "=", "args", "[", "0", "]", "bedfiles", "=", "args", "[", "1", ":", "]", "beds", "=", "[", "Bed", "(", "x", ")", "for", "x", "in", "bedfiles", "]", "fp", "=", "open", "(", "groupsfile", ")", "groups", "=", "[", "row", ".", "strip", "(", ")", ".", "split", "(", "\",\"", ")", "for", "row", "in", "fp", "]", "for", "b1", ",", "b2", "in", "product", "(", "beds", ",", "repeat", "=", "2", ")", ":", "extract_pairs", "(", "b1", ",", "b2", ",", "groups", ")" ]
Replacement for sys . excepthook .
def better_exchook ( etype , value , tb , debugshell = False , autodebugshell = True , file = None , with_color = None ) : if file is None : file = sys . stderr def output ( ln ) : """ :param str ln: :return: nothing, prints to ``file`` """ file . write ( ln + "\n" ) color = Color ( enable = with_color ) output ( color ( "EXCEPTION" , color . fg_colors [ 1 ] , bold = True ) ) all_locals , all_globals = { } , { } if tb is not None : print_tb ( tb , allLocals = all_locals , allGlobals = all_globals , file = file , withTitle = True , with_color = color . enable ) else : output ( color ( "better_exchook: traceback unknown" , color . fg_colors [ 1 ] ) ) import types # noinspection PyShadowingNames def _some_str ( value ) : """ :param object value: :rtype: str """ # noinspection PyBroadException try : return str ( value ) except Exception : return '<unprintable %s object>' % type ( value ) . __name__ # noinspection PyShadowingNames def _format_final_exc_line ( etype , value ) : value_str = _some_str ( value ) if value is None or not value_str : line = color ( "%s" % etype , color . fg_colors [ 1 ] ) else : line = color ( "%s" % etype , color . fg_colors [ 1 ] ) + ": %s" % ( value_str , ) return line # noinspection PyUnresolvedReferences if ( isinstance ( etype , BaseException ) or ( hasattr ( types , "InstanceType" ) and isinstance ( etype , types . InstanceType ) ) or etype is None or type ( etype ) is str ) : output ( _format_final_exc_line ( etype , value ) ) else : output ( _format_final_exc_line ( etype . __name__ , value ) ) if autodebugshell : # noinspection PyBroadException try : debugshell = int ( os . environ [ "DEBUG" ] ) != 0 except Exception : pass if debugshell : output ( "---------- DEBUG SHELL -----------" ) debug_shell ( user_ns = all_locals , user_global_ns = all_globals , traceback = tb ) file . flush ( )
11,783
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1173-L1244
[ "def", "get_url", "(", "self", ")", ":", "if", "self", ".", "store_url", "and", "self", ".", "url_storage", ":", "key", "=", "self", ".", "fingerprinter", ".", "get_fingerprint", "(", "self", ".", "get_file_stream", "(", ")", ")", "url", "=", "self", ".", "url_storage", ".", "get_item", "(", "key", ")", "if", "not", "url", ":", "url", "=", "self", ".", "create_url", "(", ")", "self", ".", "url_storage", ".", "set_item", "(", "key", ",", "url", ")", "return", "url", "else", ":", "return", "self", ".", "create_url", "(", ")" ]
Prints the traceback of all threads .
def dump_all_thread_tracebacks ( exclude_thread_ids = None , file = None ) : if exclude_thread_ids is None : exclude_thread_ids = [ ] if not file : file = sys . stdout import threading if hasattr ( sys , "_current_frames" ) : print ( "" , file = file ) threads = { t . ident : t for t in threading . enumerate ( ) } # noinspection PyProtectedMember for tid , stack in sys . _current_frames ( ) . items ( ) : if tid in exclude_thread_ids : continue # This is a bug in earlier Python versions. # http://bugs.python.org/issue17094 # Note that this leaves out all threads not created via the threading module. if tid not in threads : continue tags = [ ] thread = threads . get ( tid ) if thread : assert isinstance ( thread , threading . Thread ) if thread is threading . currentThread ( ) : tags += [ "current" ] # noinspection PyProtectedMember,PyUnresolvedReferences if isinstance ( thread , threading . _MainThread ) : tags += [ "main" ] tags += [ str ( thread ) ] else : tags += [ "unknown with id %i" % tid ] print ( "Thread %s:" % ", " . join ( tags ) , file = file ) print_tb ( stack , file = file ) print ( "" , file = file ) print ( "That were all threads." , file = file ) else : print ( "Does not have sys._current_frames, cannot get thread tracebacks." , file = file )
11,784
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1247-L1289
[ "def", "_get_port_speed_price_id", "(", "items", ",", "port_speed", ",", "no_public", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'port_speed'", ":", "continue", "# Check for correct capacity and if the item matches private only", "if", "any", "(", "[", "int", "(", "utils", ".", "lookup", "(", "item", ",", "'capacity'", ")", ")", "!=", "port_speed", ",", "_is_private_port_speed_item", "(", "item", ")", "!=", "no_public", ",", "not", "_is_bonded", "(", "item", ")", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for port speed: '%s'\"", "%", "port_speed", ")" ]
Some demo .
def _main ( ) : if sys . argv [ 1 : ] == [ "test" ] : for k , v in sorted ( globals ( ) . items ( ) ) : if not k . startswith ( "test_" ) : continue print ( "running: %s()" % k ) v ( ) print ( "ok." ) sys . exit ( ) elif sys . argv [ 1 : ] == [ "debug_shell" ] : debug_shell ( locals ( ) , globals ( ) ) sys . exit ( ) elif sys . argv [ 1 : ] == [ "debug_shell_exception" ] : try : raise Exception ( "demo exception" ) except Exception : better_exchook ( * sys . exc_info ( ) , debugshell = True ) sys . exit ( ) elif sys . argv [ 1 : ] : print ( "Usage: %s (test|...)" % sys . argv [ 0 ] ) sys . exit ( 1 ) # some examples # this code produces this output: https://gist.github.com/922622 try : x = { 1 : 2 , "a" : "b" } # noinspection PyMissingOrEmptyDocstring def f ( ) : y = "foo" # noinspection PyUnresolvedReferences,PyStatementEffect x , 42 , sys . stdin . __class__ , sys . exc_info , y , z f ( ) except Exception : better_exchook ( * sys . exc_info ( ) ) try : # noinspection PyArgumentList ( lambda _x : None ) ( __name__ , 42 ) # multiline except Exception : better_exchook ( * sys . exc_info ( ) ) try : class Obj : def __repr__ ( self ) : return ( "<Obj multi-\n" + " line repr>" ) obj = Obj ( ) assert not obj except Exception : better_exchook ( * sys . exc_info ( ) ) # noinspection PyMissingOrEmptyDocstring def f1 ( a ) : f2 ( a + 1 , 2 ) # noinspection PyMissingOrEmptyDocstring def f2 ( a , b ) : f3 ( a + b ) # noinspection PyMissingOrEmptyDocstring def f3 ( a ) : b = ( "abc" * 100 ) + "-interesting" # some long demo str a ( b ) # error, not callable try : f1 ( 13 ) except Exception : better_exchook ( * sys . exc_info ( ) ) # use this to overwrite the global exception handler install ( ) # and fail # noinspection PyUnresolvedReferences finalfail ( sys )
11,785
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1502-L1586
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Verify mobile id Authentication signature is valid
def verify_mid_signature ( certificate_data , sp_challenge , response_challenge , signature ) : if not response_challenge . startswith ( sp_challenge ) : return False try : key = RSA . importKey ( certificate_data ) verifier = PKCS1_v1_5 . new ( key ) except ValueError : key = ECC . import_key ( certificate_data ) verifier = DSS . new ( key , 'deterministic-rfc6979' ) digest = PrehashedMessageData ( response_challenge ) try : verifier . verify ( digest , signature ) return True except ValueError : return False
11,786
https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/signature.py#L23-L52
[ "def", "load", "(", "config_path", "=", "DEFAULT_CONFIG_PATH", ")", ":", "if", "not", "pathlib", ".", "Path", "(", "config_path", ")", ".", "exists", "(", ")", ":", "logger", ".", "debug", "(", "f\"Could not locate {config_path}, using default config.\"", ")", "return", "DEFAULT_CONFIG", "config", "=", "configparser", ".", "ConfigParser", "(", "default_section", "=", "DEFAULT_CONFIG_SECTION", ")", "config", ".", "read", "(", "config_path", ")", "operators", "=", "config", ".", "get", "(", "section", "=", "DEFAULT_CONFIG_SECTION", ",", "option", "=", "\"operators\"", ",", "fallback", "=", "DEFAULT_OPERATORS", ")", "archiver", "=", "config", ".", "get", "(", "section", "=", "DEFAULT_CONFIG_SECTION", ",", "option", "=", "\"archiver\"", ",", "fallback", "=", "DEFAULT_ARCHIVER", ")", "path", "=", "config", ".", "get", "(", "section", "=", "DEFAULT_CONFIG_SECTION", ",", "option", "=", "\"path\"", ",", "fallback", "=", "\".\"", ")", "max_revisions", "=", "int", "(", "config", ".", "get", "(", "section", "=", "DEFAULT_CONFIG_SECTION", ",", "option", "=", "\"max_revisions\"", ",", "fallback", "=", "DEFAULT_MAX_REVISIONS", ",", ")", ")", "return", "WilyConfig", "(", "operators", "=", "operators", ",", "archiver", "=", "archiver", ",", "path", "=", "path", ",", "max_revisions", "=", "max_revisions", ")" ]
Used to dispatch events .
def drive ( self , event , * args ) : maps = self . base . get ( event , self . step ) for handle , data in maps [ : ] : params = args + data try : handle ( self , * params ) except Stop : break except StopIteration : pass except Kill as Root : raise except Erase : maps . remove ( ( handle , data ) ) except Exception as e : debug ( event , params ) for handle in self . pool : handle ( self , event , args )
11,787
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/dispatcher.py#L16-L38
[ "def", "CreateDynamicDisplayAdSettings", "(", "client", ",", "opener", ")", ":", "media_service", "=", "client", ".", "GetService", "(", "'MediaService'", ",", "'v201809'", ")", "logo", "=", "{", "'xsi_type'", ":", "'Image'", ",", "'mediaId'", ":", "_CreateImage", "(", "media_service", ",", "opener", ",", "'https://goo.gl/dEvQeF'", ")", "}", "dynamic_settings", "=", "{", "'landscapeLogoImage'", ":", "logo", ",", "'pricePrefix'", ":", "'as low as'", ",", "'promoText'", ":", "'Free shipping!'", "}", "return", "dynamic_settings" ]
Send data to the child process through .
def send ( self , data ) : self . stdin . write ( data ) self . stdin . flush ( )
11,788
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/expect.py#L43-L48
[ "def", "get_token_data", "(", "self", ")", ":", "token_data", "=", "self", ".", "_keystone_auth", ".", "conn", ".", "auth_ref", "token", "=", "token_data", "[", "'auth_token'", "]", "self", ".", "set_token", "(", "token", ")", "if", "self", ".", "cache", ".", "is_redis_ok", "(", ")", ":", "try", ":", "self", ".", "cache", ".", "set_cache_token", "(", "token_data", ")", "except", "CacheException", ":", "self", ".", "logger", ".", "error", "(", "'Token not setted in cache.'", ")", "token_data", "=", "{", "'expires_at'", ":", "token_data", "[", "'expires_at'", "]", ",", "'token'", ":", "token", "}", "return", "token_data" ]
If positional or keyword arguments are empty return only one or the other .
def _simplify_arguments ( arguments ) : if len ( arguments . args ) == 0 : return arguments . kwargs elif len ( arguments . kwargs ) == 0 : return arguments . args else : return arguments
11,789
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L310-L319
[ "def", "close", "(", "self", ")", ":", "windll", ".", "kernel32", ".", "CloseHandle", "(", "self", ".", "conout_pipe", ")", "windll", ".", "kernel32", ".", "CloseHandle", "(", "self", ".", "conin_pipe", ")" ]
Load this step s result from its dump directory
def load ( self ) : hdf_filename = os . path . join ( self . _dump_dirname , 'result.h5' ) if os . path . isfile ( hdf_filename ) : store = pd . HDFStore ( hdf_filename , mode = 'r' ) keys = store . keys ( ) if keys == [ '/df' ] : self . result = store [ 'df' ] else : if set ( keys ) == set ( map ( lambda i : '/%s' % i , range ( len ( keys ) ) ) ) : # keys are not necessarily ordered self . result = [ store [ str ( k ) ] for k in range ( len ( keys ) ) ] else : self . result = { k [ 1 : ] : store [ k ] for k in keys } else : self . result = joblib . load ( os . path . join ( self . _output_dirname , 'dump' , 'result.pkl' ) )
11,790
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L211-L230
[ "def", "create", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "super", "(", "ImageMemberManager", ",", "self", ")", ".", "create", "(", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "if", "e", ".", "http_status", "==", "403", ":", "raise", "exc", ".", "UnsharableImage", "(", "\"You cannot share a public image.\"", ")", "else", ":", "raise" ]
Set up dump creating directories and writing step . yaml file containing yaml dump of this step .
def setup_dump ( self ) : dumpdir = self . _dump_dirname if not os . path . isdir ( dumpdir ) : os . makedirs ( dumpdir ) dump = False yaml_filename = self . _yaml_filename if not os . path . isfile ( yaml_filename ) : dump = True else : with open ( yaml_filename ) as f : if f . read ( ) != yaml . dump ( self ) : logging . warning ( 'Existing step.yaml does not match hash, regenerating' ) dump = True if dump : with open ( yaml_filename , 'w' ) as f : yaml . dump ( self , f )
11,791
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L232-L258
[ "def", "cancelHistoricalData", "(", "self", ",", "bars", ":", "BarDataList", ")", ":", "self", ".", "client", ".", "cancelHistoricalData", "(", "bars", ".", "reqId", ")", "self", ".", "wrapper", ".", "endSubscription", "(", "bars", ")" ]
package - docs is a CLI for building single - package previews of documentation in the LSST Stack .
def main ( ctx , root_dir , verbose ) : root_dir = discover_package_doc_dir ( root_dir ) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx . obj = { 'root_dir' : root_dir , 'verbose' : verbose } # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose : log_level = logging . DEBUG else : log_level = logging . INFO logger = logging . getLogger ( 'documenteer' ) logger . addHandler ( logging . StreamHandler ( ) ) logger . setLevel ( log_level )
11,792
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/packagecli.py#L41-L78
[ "def", "getTotalCpuTimeAndMemoryUsage", "(", ")", ":", "me", "=", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_SELF", ")", "childs", "=", "resource", ".", "getrusage", "(", "resource", ".", "RUSAGE_CHILDREN", ")", "totalCpuTime", "=", "me", ".", "ru_utime", "+", "me", ".", "ru_stime", "+", "childs", ".", "ru_utime", "+", "childs", ".", "ru_stime", "totalMemoryUsage", "=", "me", ".", "ru_maxrss", "+", "me", ".", "ru_maxrss", "return", "totalCpuTime", ",", "totalMemoryUsage" ]
Fetches the row - aggregated input columns for this ColumnFunction .
def apply_and_name ( self , aggregator ) : reduced_df = self . _apply ( aggregator ) if len ( self . names ) != len ( reduced_df . columns ) : raise IndexError ( "ColumnFunction creates more columns than it has names for." ) reduced_df . columns = self . names return reduced_df
11,793
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L106-L122
[ "def", "_get_all_volumes_paths", "(", "conn", ")", ":", "volumes", "=", "[", "vol", "for", "l", "in", "[", "obj", ".", "listAllVolumes", "(", ")", "for", "obj", "in", "conn", ".", "listAllStoragePools", "(", ")", "]", "for", "vol", "in", "l", "]", "return", "{", "vol", ".", "path", "(", ")", ":", "[", "path", ".", "text", "for", "path", "in", "ElementTree", ".", "fromstring", "(", "vol", ".", "XMLDesc", "(", ")", ")", ".", "findall", "(", "'.//backingStore/path'", ")", "]", "for", "vol", "in", "volumes", "if", "_is_valid_volume", "(", "vol", ")", "}" ]
Performs a groupby of the unique Columns by index as constructed from self . df .
def aggregate ( self , index ) : # deal with index as a string vs index as a index/MultiIndex if isinstance ( index , string_types ) : col_df_grouped = self . col_df . groupby ( self . df [ index ] ) else : self . col_df . index = pd . MultiIndex . from_arrays ( [ self . df [ i ] for i in index ] ) col_df_grouped = self . col_df . groupby ( level = index ) self . col_df . index = self . df . index # perform the actual aggregation self . reduced_df = pd . DataFrame ( { colred : col_df_grouped [ colred . column ] . agg ( colred . agg_func ) for colred in self . column_reductions } ) # then apply the functions to produce the final dataframe reduced_dfs = [ ] for cf in self . column_functions : # each apply_and_name() calls get_reduced() with the column reductions it wants reduced_dfs . append ( cf . apply_and_name ( self ) ) return pd . concat ( reduced_dfs , axis = 1 )
11,794
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L260-L291
[ "def", "cos_open", "(", "file", ",", "mode", "=", "'r'", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "storage", "=", "None", ",", "storage_parameters", "=", "None", ",", "unsecure", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Handles file-like objects:", "if", "hasattr", "(", "file", ",", "'read'", ")", ":", "with", "_text_io_wrapper", "(", "file", ",", "mode", ",", "encoding", ",", "errors", ",", "newline", ")", "as", "wrapped", ":", "yield", "wrapped", "return", "# Handles path-like objects", "file", "=", "fsdecode", "(", "file", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Storage object", "if", "is_storage", "(", "file", ",", "storage", ")", ":", "with", "get_instance", "(", "name", "=", "file", ",", "cls", "=", "'raw'", "if", "buffering", "==", "0", "else", "'buffered'", ",", "storage", "=", "storage", ",", "storage_parameters", "=", "storage_parameters", ",", "mode", "=", "mode", ",", "unsecure", "=", "unsecure", ",", "*", "*", "kwargs", ")", "as", "stream", ":", "with", "_text_io_wrapper", "(", "stream", ",", "mode", "=", "mode", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "newline", "=", "newline", ")", "as", "wrapped", ":", "yield", "wrapped", "# Local file: Redirect to \"io.open\"", "else", ":", "with", "io_open", "(", "file", ",", "mode", ",", "buffering", ",", "encoding", ",", "errors", ",", "newline", ",", "*", "*", "kwargs", ")", "as", "stream", ":", "yield", "stream" ]
Returns a dataframe with the requested ColumnReductions .
def _apply ( self , aggregator ) : reduced_dfs = [ ] if self . include_fraction : n_df = self . numerator . apply_and_name ( aggregator ) d_df = self . denominator . apply_and_name ( aggregator ) reduced_dfs . extend ( [ n_df [ cn ] / d_df [ cd ] for cn , cd in product ( n_df . columns , d_df . columns ) ] ) if self . include_numerator : reduced_dfs . append ( self . numerator . apply_and_name ( aggregator ) ) if self . include_denominator : reduced_dfs . append ( self . denominator . apply_and_name ( aggregator ) ) return pd . concat ( reduced_dfs , axis = 1 )
11,795
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L350-L367
[ "def", "to_ufo_components", "(", "self", ",", "ufo_glyph", ",", "layer", ")", ":", "pen", "=", "ufo_glyph", ".", "getPointPen", "(", ")", "for", "index", ",", "component", "in", "enumerate", "(", "layer", ".", "components", ")", ":", "pen", ".", "addComponent", "(", "component", ".", "name", ",", "component", ".", "transform", ")", "if", "component", ".", "anchor", ":", "if", "COMPONENT_INFO_KEY", "not", "in", "ufo_glyph", ".", "lib", ":", "ufo_glyph", ".", "lib", "[", "COMPONENT_INFO_KEY", "]", "=", "[", "]", "ufo_glyph", ".", "lib", "[", "COMPONENT_INFO_KEY", "]", ".", "append", "(", "{", "\"name\"", ":", "component", ".", "name", ",", "\"index\"", ":", "index", ",", "\"anchor\"", ":", "component", ".", "anchor", "}", ")", "# data related to components stored in lists of booleans", "# each list's elements correspond to the components in order", "for", "key", "in", "[", "\"alignment\"", ",", "\"locked\"", ",", "\"smartComponentValues\"", "]", ":", "values", "=", "[", "getattr", "(", "c", ",", "key", ")", "for", "c", "in", "layer", ".", "components", "]", "if", "any", "(", "values", ")", ":", "ufo_glyph", ".", "lib", "[", "_lib_key", "(", "key", ")", "]", "=", "values" ]
Create a clone of the Table optionally with some properties changed
def clone ( self , * * kwargs ) : init_kwargs = { "name" : self . __name , "dataframe" : self . __df , "include_columns" : self . __include_columns , "include_index" : self . __include_index , "style" : self . __style , "column_styles" : self . __col_styles , "column_widths" : self . __column_widths , "row_styles" : self . __row_styles , "header_style" : self . header_style , "index_style" : self . index_style } init_kwargs . update ( kwargs ) return self . __class__ ( * * init_kwargs )
11,796
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/table.py#L105-L120
[ "def", "cli", "(", "ctx", ",", "board", ",", "serial_port", ",", "ftdi_id", ",", "sram", ",", "project_dir", ",", "verbose", ",", "verbose_yosys", ",", "verbose_arachne", ")", ":", "drivers", "=", "Drivers", "(", ")", "drivers", ".", "pre_upload", "(", ")", "# Run scons", "exit_code", "=", "SCons", "(", "project_dir", ")", ".", "upload", "(", "{", "'board'", ":", "board", ",", "'verbose'", ":", "{", "'all'", ":", "verbose", ",", "'yosys'", ":", "verbose_yosys", ",", "'arachne'", ":", "verbose_arachne", "}", "}", ",", "serial_port", ",", "ftdi_id", ",", "sram", ")", "drivers", ".", "post_upload", "(", ")", "ctx", ".", "exit", "(", "exit_code", ")" ]
Inspect SDFile list of string
def inspect ( lines ) : labels = set ( ) count = 0 exp = re . compile ( r">.*?<([\w ]+)>" ) # Space should be accepted valid = False for line in lines : if line . startswith ( "M END\n" ) : valid = True elif line . startswith ( "$$$$" ) : count += 1 valid = False else : result = exp . match ( line ) if result : labels . add ( result . group ( 1 ) ) if valid : count += 1 return list ( labels ) , count
11,797
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L17-L39
[ "def", "icao", "(", "msg", ")", ":", "DF", "=", "df", "(", "msg", ")", "if", "DF", "in", "(", "11", ",", "17", ",", "18", ")", ":", "addr", "=", "msg", "[", "2", ":", "8", "]", "elif", "DF", "in", "(", "0", ",", "4", ",", "5", ",", "16", ",", "20", ",", "21", ")", ":", "c0", "=", "bin2int", "(", "crc", "(", "msg", ",", "encode", "=", "True", ")", ")", "c1", "=", "hex2int", "(", "msg", "[", "-", "6", ":", "]", ")", "addr", "=", "'%06X'", "%", "(", "c0", "^", "c1", ")", "else", ":", "addr", "=", "None", "return", "addr" ]
Inspect SDFile structure
def inspect_file ( path ) : with open ( path , 'rb' ) as f : labels , count = inspect ( tx . decode ( line ) for line in f ) return labels , count
11,798
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L49-L57
[ "def", "open_pager", "(", "self", ")", ":", "n_rows", ",", "n_cols", "=", "self", ".", "term", ".", "stdscr", ".", "getmaxyx", "(", ")", "if", "self", ".", "config", "[", "'max_pager_cols'", "]", "is", "not", "None", ":", "n_cols", "=", "min", "(", "n_cols", ",", "self", ".", "config", "[", "'max_pager_cols'", "]", ")", "data", "=", "self", ".", "get_selected_item", "(", ")", "if", "data", "[", "'type'", "]", "==", "'Submission'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'text'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "elif", "data", "[", "'type'", "]", "==", "'Comment'", ":", "text", "=", "'\\n\\n'", ".", "join", "(", "(", "data", "[", "'permalink'", "]", ",", "data", "[", "'body'", "]", ")", ")", "self", ".", "term", ".", "open_pager", "(", "text", ",", "wrap", "=", "n_cols", ")", "else", ":", "self", ".", "term", ".", "flash", "(", ")" ]
Parse SDFile data part into dict
def optional_data ( lines ) : data = { } exp = re . compile ( r">.*?<([\w ]+)>" ) # Space should be accepted for i , line in enumerate ( lines ) : result = exp . match ( line ) if result : data [ result . group ( 1 ) ] = lines [ i + 1 ] return data
11,799
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L60-L68
[ "def", "interpoled_resampling", "(", "W", ",", "x", ")", ":", "N", "=", "W", ".", "shape", "[", "0", "]", "idx", "=", "np", ".", "argsort", "(", "x", ")", "xs", "=", "x", "[", "idx", "]", "ws", "=", "W", "[", "idx", "]", "cs", "=", "np", ".", "cumsum", "(", "avg_n_nplusone", "(", "ws", ")", ")", "u", "=", "random", ".", "rand", "(", "N", ")", "xrs", "=", "np", ".", "empty", "(", "N", ")", "where", "=", "np", ".", "searchsorted", "(", "cs", ",", "u", ")", "# costs O(N log(N)) but algorithm has O(N log(N)) complexity anyway", "for", "n", "in", "range", "(", "N", ")", ":", "m", "=", "where", "[", "n", "]", "if", "m", "==", "0", ":", "xrs", "[", "n", "]", "=", "xs", "[", "0", "]", "elif", "m", "==", "N", ":", "xrs", "[", "n", "]", "=", "xs", "[", "-", "1", "]", "else", ":", "xrs", "[", "n", "]", "=", "interpol", "(", "cs", "[", "m", "-", "1", "]", ",", "cs", "[", "m", "]", ",", "xs", "[", "m", "-", "1", "]", ",", "xs", "[", "m", "]", ",", "u", "[", "n", "]", ")", "return", "xrs" ]