query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Read reference info from root of ReSpecTh XML file .
def get_reference ( root ) : reference = { } elem = root . find ( 'bibliographyLink' ) if elem is None : raise MissingElementError ( 'bibliographyLink' ) # Try to get reference info via DOI, fall back on preferredKey if necessary. ref_doi = elem . get ( 'doi' , None ) ref_key = elem . get ( 'preferredKey' , None ) if ref_doi is not None : try : ref = crossref_api . works ( ids = ref_doi ) [ 'message' ] except ( HTTPError , habanero . RequestError , ConnectionError ) : if ref_key is None : raise KeywordError ( 'DOI not found and preferredKey attribute not set' ) else : warn ( 'Missing doi attribute in bibliographyLink or lookup failed. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference [ 'detail' ] = ref_key if reference [ 'detail' ] [ - 1 ] != '.' : reference [ 'detail' ] += '.' else : if ref_key is not None : warn ( 'Using DOI to obtain reference information, rather than preferredKey.' ) reference [ 'doi' ] = elem . attrib [ 'doi' ] # Now get elements of the reference data # Assume that the reference returned by the DOI lookup always has a container-title reference [ 'journal' ] = ref . get ( 'container-title' ) [ 0 ] ref_year = ref . get ( 'published-print' ) or ref . get ( 'published-online' ) reference [ 'year' ] = int ( ref_year [ 'date-parts' ] [ 0 ] [ 0 ] ) reference [ 'volume' ] = int ( ref . get ( 'volume' ) ) reference [ 'pages' ] = ref . get ( 'page' ) reference [ 'authors' ] = [ ] for author in ref [ 'author' ] : auth = { } auth [ 'name' ] = ' ' . join ( [ author [ 'given' ] , author [ 'family' ] ] ) # Add ORCID if available orcid = author . get ( 'ORCID' ) if orcid : auth [ 'ORCID' ] = orcid . lstrip ( 'http://orcid.org/' ) reference [ 'authors' ] . append ( auth ) elif ref_key is not None : warn ( 'Missing doi attribute in bibliographyLink. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference [ 'detail' ] = ref_key if reference [ 'detail' ] [ - 1 ] != '.' : reference [ 'detail' ] += '.' else : # Need one of DOI or preferredKey raise MissingAttributeError ( 'preferredKey' , 'bibliographyLink' ) return reference
1,700
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L86-L149
[ "def", "set_global_defaults", "(", "*", "*", "kwargs", ")", ":", "valid_options", "=", "[", "'active'", ",", "'selected'", ",", "'disabled'", ",", "'on'", ",", "'off'", ",", "'on_active'", ",", "'on_selected'", ",", "'on_disabled'", ",", "'off_active'", ",", "'off_selected'", ",", "'off_disabled'", ",", "'color'", ",", "'color_on'", ",", "'color_off'", ",", "'color_active'", ",", "'color_selected'", ",", "'color_disabled'", ",", "'color_on_selected'", ",", "'color_on_active'", ",", "'color_on_disabled'", ",", "'color_off_selected'", ",", "'color_off_active'", ",", "'color_off_disabled'", ",", "'animation'", ",", "'offset'", ",", "'scale_factor'", ",", "]", "for", "kw", "in", "kwargs", ":", "if", "kw", "in", "valid_options", ":", "_default_options", "[", "kw", "]", "=", "kwargs", "[", "kw", "]", "else", ":", "error", "=", "\"Invalid option '{0}'\"", ".", "format", "(", "kw", ")", "raise", "KeyError", "(", "error", ")" ]
Gets ignition type and target .
def get_ignition_type ( root ) : properties = { } elem = root . find ( 'ignitionType' ) if elem is None : raise MissingElementError ( 'ignitionType' ) elem = elem . attrib if 'target' in elem : ign_target = elem [ 'target' ] . rstrip ( ';' ) . upper ( ) else : raise MissingAttributeError ( 'target' , 'ignitionType' ) if 'type' in elem : ign_type = elem [ 'type' ] if ign_type == 'baseline max intercept from d/dt' : ign_type = 'd/dt max extrapolated' else : raise MissingAttributeError ( 'type' , 'ignitionType' ) # ReSpecTh allows multiple ignition targets if len ( ign_target . split ( ';' ) ) > 1 : raise NotImplementedError ( 'Multiple ignition targets not supported.' ) # Acceptable ignition targets include pressure, temperature, and species # concentrations if ign_target == 'OHEX' : ign_target = 'OH*' elif ign_target == 'CHEX' : ign_target = 'CH*' elif ign_target == 'P' : ign_target = 'pressure' elif ign_target == 'T' : ign_target = 'temperature' if ign_target not in [ 'pressure' , 'temperature' , 'OH' , 'OH*' , 'CH*' , 'CH' ] : raise KeywordError ( ign_target + ' not valid ignition target' ) if ign_type not in [ 'max' , 'd/dt max' , '1/2 max' , 'min' , 'd/dt max extrapolated' ] : raise KeywordError ( ign_type + ' not valid ignition type' ) properties [ 'type' ] = ign_type properties [ 'target' ] = ign_target return properties
1,701
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L263-L315
[ "def", "create_pgroup_snapshot", "(", "self", ",", "source", ",", "*", "*", "kwargs", ")", ":", "# In REST 1.4, support was added for snapshotting multiple pgroups. As a", "# result, the endpoint response changed from an object to an array of", "# objects. To keep the response type consistent between REST versions,", "# we unbox the response when creating a single snapshot.", "result", "=", "self", ".", "create_pgroup_snapshots", "(", "[", "source", "]", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_rest_version", ">=", "LooseVersion", "(", "\"1.4\"", ")", ":", "headers", "=", "result", ".", "headers", "result", "=", "ResponseDict", "(", "result", "[", "0", "]", ")", "result", ".", "headers", "=", "headers", "return", "result" ]
Convert ReSpecTh XML file to ChemKED - compliant dictionary .
def ReSpecTh_to_ChemKED ( filename_xml , file_author = '' , file_author_orcid = '' , * , validate = False ) : # get all information from XML file tree = etree . parse ( filename_xml ) root = tree . getroot ( ) # get file metadata properties = get_file_metadata ( root ) # get reference info properties [ 'reference' ] = get_reference ( root ) # Save name of original data filename properties [ 'reference' ] [ 'detail' ] = ( properties [ 'reference' ] . get ( 'detail' , '' ) + 'Converted from ReSpecTh XML file ' + os . path . basename ( filename_xml ) ) # Ensure ignition delay, and get which kind of experiment properties . update ( get_experiment_kind ( root ) ) # Get properties shared across the file properties [ 'common-properties' ] = get_common_properties ( root ) # Determine definition of ignition delay properties [ 'common-properties' ] [ 'ignition-type' ] = get_ignition_type ( root ) # Now parse ignition delay datapoints properties [ 'datapoints' ] = get_datapoints ( root ) # Ensure inclusion of pressure rise or volume history matches apparatus. has_pres_rise = ( 'pressure-rise' in properties [ 'common-properties' ] or any ( [ True for dp in properties [ 'datapoints' ] if 'pressure-rise' in dp ] ) ) if has_pres_rise and properties [ 'apparatus' ] [ 'kind' ] == 'rapid compression machine' : raise KeywordError ( 'Pressure rise cannot be defined for RCM.' ) has_vol_hist = any ( [ t . get ( 'type' ) == 'volume' for dp in properties [ 'datapoints' ] for t in dp . get ( 'time-histories' , [ { } ] ) ] ) if has_vol_hist and properties [ 'apparatus' ] [ 'kind' ] == 'shock tube' : raise KeywordError ( 'Volume history cannot be defined for shock tube.' ) # add any additional file authors if file_author_orcid and not file_author : raise KeywordError ( 'If file_author_orcid is specified, file_author must be as well' ) if file_author : temp_author = { 'name' : file_author } if file_author_orcid : temp_author [ 'ORCID' ] = file_author_orcid properties [ 'file-authors' ] . append ( temp_author ) # Now go through datapoints and apply common properties for idx in range ( len ( properties [ 'datapoints' ] ) ) : for prop in properties [ 'common-properties' ] : properties [ 'datapoints' ] [ idx ] [ prop ] = properties [ 'common-properties' ] [ prop ] if validate : chemked . ChemKED ( dict_input = properties ) return properties
1,702
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L474-L544
[ "def", "fillNullValues", "(", "col", ",", "rows", ")", ":", "lastval", "=", "None", "nullfunc", "=", "isNullFunc", "(", ")", "n", "=", "0", "rowsToFill", "=", "list", "(", "rows", ")", "for", "r", "in", "Progress", "(", "col", ".", "sheet", ".", "rows", ",", "'filling'", ")", ":", "# loop over all rows", "try", ":", "val", "=", "col", ".", "getValue", "(", "r", ")", "except", "Exception", "as", "e", ":", "val", "=", "e", "if", "nullfunc", "(", "val", ")", "and", "r", "in", "rowsToFill", ":", "if", "lastval", ":", "col", ".", "setValue", "(", "r", ",", "lastval", ")", "n", "+=", "1", "else", ":", "lastval", "=", "val", "col", ".", "recalc", "(", ")", "status", "(", "\"filled %d values\"", "%", "n", ")" ]
Command - line entry point for converting a ReSpecTh XML file to a ChemKED YAML file .
def respth2ck ( argv = None ) : parser = ArgumentParser ( description = 'Convert a ReSpecTh XML file to a ChemKED YAML file.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.yaml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.xml")' ) parser . add_argument ( '-fa' , '--file-author' , dest = 'file_author' , type = str , required = False , default = '' , help = 'File author name to override original' ) parser . add_argument ( '-fo' , '--file-author-orcid' , dest = 'file_author_orcid' , type = str , required = False , default = '' , help = 'File author ORCID' ) args = parser . parse_args ( argv ) filename_ck = args . output filename_xml = args . input properties = ReSpecTh_to_ChemKED ( filename_xml , args . file_author , args . file_author_orcid , validate = True ) # set output filename and path if not filename_ck : filename_ck = os . path . join ( os . path . dirname ( filename_xml ) , os . path . splitext ( os . path . basename ( filename_xml ) ) [ 0 ] + '.yaml' ) with open ( filename_ck , 'w' ) as outfile : yaml . dump ( properties , outfile , default_flow_style = False ) print ( 'Converted to ' + filename_ck )
1,703
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L547-L595
[ "def", "ended", "(", "self", ")", ":", "statistics", "=", "self", ".", "_properties", ".", "get", "(", "\"statistics\"", ")", "if", "statistics", "is", "not", "None", ":", "millis", "=", "statistics", ".", "get", "(", "\"endTime\"", ")", "if", "millis", "is", "not", "None", ":", "return", "_helpers", ".", "_datetime_from_microseconds", "(", "millis", "*", "1000.0", ")" ]
Command - line entry point for converting a ChemKED YAML file to a ReSpecTh XML file .
def ck2respth ( argv = None ) : parser = ArgumentParser ( description = 'Convert a ChemKED YAML file to a ReSpecTh XML file.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.xml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.yaml")' ) args = parser . parse_args ( argv ) c = chemked . ChemKED ( yaml_file = args . input ) c . convert_to_ReSpecTh ( args . output )
1,704
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L598-L619
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "\"all_set\"", ":", "self", ".", "_is_all_set", "(", ")", ",", "\"progress\"", ":", "self", ".", "progress", "(", ")", ",", "\"values\"", ":", "{", "property_name", ":", "getattr", "(", "self", ",", "property_name", ")", "or", "[", "]", "for", "property_name", "in", "worker_mapping", "(", ")", ".", "keys", "(", ")", "}", "}" ]
General function for converting between ReSpecTh and ChemKED files based on extension .
def main ( argv = None ) : parser = ArgumentParser ( description = 'Convert between ReSpecTh XML file and ChemKED YAML file ' 'automatically based on file extension.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.yaml" or "file2.xml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.xml" or "file2.yaml")' ) parser . add_argument ( '-fa' , '--file-author' , dest = 'file_author' , type = str , required = False , default = '' , help = 'File author name to override original' ) parser . add_argument ( '-fo' , '--file-author-orcid' , dest = 'file_author_orcid' , type = str , required = False , default = '' , help = 'File author ORCID' ) args = parser . parse_args ( argv ) if os . path . splitext ( args . input ) [ 1 ] == '.xml' and os . path . splitext ( args . output ) [ 1 ] == '.yaml' : respth2ck ( [ '-i' , args . input , '-o' , args . output , '-fa' , args . file_author , '-fo' , args . file_author_orcid ] ) elif os . path . splitext ( args . input ) [ 1 ] == '.yaml' and os . path . splitext ( args . output ) [ 1 ] == '.xml' : c = chemked . ChemKED ( yaml_file = args . input ) c . convert_to_ReSpecTh ( args . output ) elif os . path . splitext ( args . input ) [ 1 ] == '.xml' and os . path . splitext ( args . output ) [ 1 ] == '.xml' : raise KeywordError ( 'Cannot convert .xml to .xml' ) elif os . path . splitext ( args . input ) [ 1 ] == '.yaml' and os . path . splitext ( args . output ) [ 1 ] == '.yaml' : raise KeywordError ( 'Cannot convert .yaml to .yaml' ) else : raise KeywordError ( 'Input/output args need to be .xml/.yaml' )
1,705
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/converters.py#L622-L672
[ "def", "draw_canvas", "(", ")", ":", "for", "x", "in", "range", "(", "len", "(", "world", ")", ")", ":", "for", "y", "in", "range", "(", "len", "(", "world", "[", "x", "]", ")", ")", ":", "if", "world", "[", "x", "]", "[", "y", "]", ".", "value", ":", "color", "=", "world", "[", "x", "]", "[", "y", "]", ".", "color_alive", ".", "get_as_hex", "(", ")", "else", ":", "color", "=", "world", "[", "x", "]", "[", "y", "]", ".", "color_dead", ".", "get_as_hex", "(", ")", "canvas", ".", "itemconfig", "(", "canvas_grid", "[", "x", "]", "[", "y", "]", ",", "fill", "=", "color", ")" ]
Add user details .
def process_exception ( self , request , exception ) : if request . user and hasattr ( request . user , 'email' ) : request . META [ 'USER' ] = request . user . email
1,706
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/middleware.py#L36-L41
[ "def", "validate_arguments", "(", "log", ",", "whitelisted_args", ",", "args", ")", ":", "valid_patterns", "=", "{", "re", ".", "compile", "(", "p", ")", ":", "v", "for", "p", ",", "v", "in", "whitelisted_args", ".", "items", "(", ")", "}", "def", "validate", "(", "idx", ")", ":", "arg", "=", "args", "[", "idx", "]", "for", "pattern", ",", "has_argument", "in", "valid_patterns", ".", "items", "(", ")", ":", "if", "pattern", ".", "match", "(", "arg", ")", ":", "return", "2", "if", "has_argument", "else", "1", "log", ".", "warn", "(", "\"Zinc argument '{}' is not supported, and is subject to change/removal!\"", ".", "format", "(", "arg", ")", ")", "return", "1", "arg_index", "=", "0", "while", "arg_index", "<", "len", "(", "args", ")", ":", "arg_index", "+=", "validate", "(", "arg_index", ")" ]
Send broken link emails for relevant 404 NOT FOUND responses .
def process_response ( self , request , response ) : if response . status_code == 404 and not settings . DEBUG : domain = request . get_host ( ) path = request . get_full_path ( ) referer = force_text ( request . META . get ( 'HTTP_REFERER' , '' ) , errors = 'replace' ) if not self . is_ignorable_request ( request , path , domain , referer ) : ua = request . META . get ( 'HTTP_USER_AGENT' , '<none>' ) ip = request . META . get ( 'REMOTE_ADDR' , '<none>' ) user = None if request . user and hasattr ( request . user , 'email' ) : user = request . user . email content = ( "Referrer: %s\n" "Requested URL: %s\n" "User agent: %s\n" "IP address: %s\n" "User: %s\n" ) % ( referer , path , ua , ip , user ) if self . is_internal_request ( domain , referer ) : internal = 'INTERNAL ' else : internal = '' mail_managers ( "Broken %slink on %s" % ( internal , domain ) , content , fail_silently = True ) return response
1,707
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/middleware.py#L94-L129
[ "def", "load", "(", "cls", ",", "fname", ",", "args", ")", ":", "if", "args", ".", "type", "==", "JSON", ":", "if", "fname", ".", "endswith", "(", "'.bz2'", ")", ":", "open_", "=", "bz2", ".", "open", "else", ":", "open_", "=", "open", "if", "args", ".", "progress", ":", "print", "(", "'Loading JSON data...'", ")", "with", "open_", "(", "fname", ",", "'rt'", ")", "as", "fp", ":", "storage", "=", "JsonStorage", ".", "load", "(", "fp", ")", "else", ":", "storage", "=", "SqliteStorage", ".", "load", "(", "fname", ")", "if", "args", ".", "settings", "is", "not", "None", ":", "extend", "(", "storage", ".", "settings", ",", "args", ".", "settings", ")", "return", "cls", ".", "from_storage", "(", "storage", ")" ]
Returns True if referring URL is the same domain as current request .
def is_internal_request ( self , domain , referer ) : # Different subdomains are treated as different domains. return bool ( re . match ( "^https?://%s/" % re . escape ( domain ) , referer ) )
1,708
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/middleware.py#L131-L137
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
Parses a duration string representation
def parse ( self , representation ) : elements = extract_tokens ( representation ) try : scales = [ DurationRepresentation ( float ( p [ 0 ] ) , Scale ( p [ 1 ] ) ) for p in elements ] except ValueError : raise ScaleFormatError ( "Malformed duration representation: {0}" . format ( representation ) ) return scales
1,709
https://github.com/oleiade/durations/blob/62c176dfa7d36d5c59bf93bdebfdc80ab53757bd/durations/duration.py#L53-L71
[ "def", "parse", "(", "self", ",", "data", ")", ":", "categories", "=", "data", ".", "split", "(", "\"\\n\\n\"", ")", "reference", "=", "{", "}", "reference_points", "=", "{", "}", "genre_index", "=", "[", "]", "tag_index", "=", "[", "]", "for", "category", "in", "categories", ":", "entries", "=", "category", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "entry_category", ",", "entry_points", "=", "self", ".", "_parse_entry", "(", "entries", "[", "0", "]", ".", "lower", "(", ")", ")", "if", "entry_category", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "for", "entry", "in", "entries", ":", "entry", "=", "entry", ".", "lower", "(", ")", "if", "not", "entry", ":", "continue", "# Comment, ignore", "if", "entry", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "# Handle genre", "if", "not", "entry", ".", "startswith", "(", "\"-\"", ")", ":", "genre", ",", "points", "=", "self", ".", "_parse_entry", "(", "entry", ")", "reference", "[", "genre", "]", "=", "entry_category", "reference_points", "[", "genre", "]", "=", "points", "genre_index", ".", "append", "(", "genre", ")", "# Handle tag", "else", ":", "tag", "=", "entry", "[", "1", ":", "]", "tag", ",", "points", "=", "self", ".", "_parse_entry", "(", "tag", ",", "limit", "=", "9.5", ")", "reference", "[", "tag", "]", "=", "entry_category", "reference_points", "[", "tag", "]", "=", "points", "tag_index", ".", "append", "(", "tag", ")", "self", ".", "reference", "=", "reference", "self", ".", "genres", "=", "genre_index", "self", ".", "tags", "=", "tag_index", "self", ".", "points", "=", "reference_points" ]
Get or create an entry using obtained information from an IP .
def get_or_create_from_ip ( ip ) : data = ip_api_handler . get ( ip ) if data and any ( v for v in data . values ( ) ) : if data . get ( 'ip_address' , None ) is None or not data [ 'ip_address' ] : data [ 'ip_address' ] = ip return IPInfo . objects . get_or_create ( * * data ) return None , False
1,710
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/models.py#L128-L143
[ "def", "if_sqlserver_disable_constraints_triggers", "(", "session", ":", "SqlASession", ",", "tablename", ":", "str", ")", "->", "None", ":", "with", "if_sqlserver_disable_constraints", "(", "session", ",", "tablename", ")", ":", "with", "if_sqlserver_disable_triggers", "(", "session", ",", "tablename", ")", ":", "yield" ]
Update the IP info .
def update_ip_info ( self , since_days = 10 , save = False , force = False ) : # If ip already checked try : last_check = IPInfoCheck . objects . get ( ip_address = self . client_ip_address ) # If checked less than since_days ago, don't check again since_last = datetime . date . today ( ) - last_check . date if since_last <= datetime . timedelta ( days = since_days ) : if not self . ip_info or ( self . ip_info != last_check . ip_info and force ) : self . ip_info = last_check . ip_info self . save ( ) return True elif save : self . save ( ) return False # Get or create ip_info object ip_info , created = IPInfo . get_or_create_from_ip ( self . client_ip_address ) # Update check time last_check . date = datetime . date . today ( ) last_check . save ( ) # Maybe data changed if created : last_check . ip_info = ip_info self . ip_info = ip_info self . save ( ) return True elif save : self . save ( ) return False except IPInfoCheck . DoesNotExist : # Else if ip never checked, check it and set ip_info self . ip_info = IPInfoCheck . check_ip ( self . client_ip_address ) self . save ( ) return True
1,711
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/models.py#L333-L387
[ "def", "_unscramble_regressor_columns", "(", "parent_data", ",", "data", ")", ":", "matches", "=", "[", "'_power[0-9]+'", ",", "'_derivative[0-9]+'", "]", "var", "=", "OrderedDict", "(", "(", "c", ",", "deque", "(", ")", ")", "for", "c", "in", "parent_data", ".", "columns", ")", "for", "c", "in", "data", ".", "columns", ":", "col", "=", "c", "for", "m", "in", "matches", ":", "col", "=", "re", ".", "sub", "(", "m", ",", "''", ",", "col", ")", "if", "col", "==", "c", ":", "var", "[", "col", "]", ".", "appendleft", "(", "c", ")", "else", ":", "var", "[", "col", "]", ".", "append", "(", "c", ")", "unscrambled", "=", "reduce", "(", "(", "lambda", "x", ",", "y", ":", "x", "+", "y", ")", ",", "var", ".", "values", "(", ")", ")", "return", "data", "[", "[", "*", "unscrambled", "]", "]" ]
Start a thread to continuously read log files and append lines in DB .
def start_daemon ( ) : if RequestLog . daemon is None : parser = get_nginx_parser ( ) RequestLog . daemon = RequestLog . ParseToDBThread ( parser , daemon = True ) RequestLog . daemon . start ( ) return RequestLog . daemon
1,712
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/models.py#L648-L662
[ "def", "author_info", "(", "name", ",", "contact", "=", "None", ",", "public_key", "=", "None", ")", ":", "return", "Storage", "(", "name", "=", "name", ",", "contact", "=", "contact", ",", "public_key", "=", "public_key", ")" ]
Construct a ChemKED instance directly from a ReSpecTh file .
def from_respecth ( cls , filename_xml , file_author = '' , file_author_orcid = '' ) : properties = ReSpecTh_to_ChemKED ( filename_xml , file_author , file_author_orcid , validate = False ) return cls ( dict_input = properties )
1,713
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L146-L167
[ "def", "fillNullValues", "(", "col", ",", "rows", ")", ":", "lastval", "=", "None", "nullfunc", "=", "isNullFunc", "(", ")", "n", "=", "0", "rowsToFill", "=", "list", "(", "rows", ")", "for", "r", "in", "Progress", "(", "col", ".", "sheet", ".", "rows", ",", "'filling'", ")", ":", "# loop over all rows", "try", ":", "val", "=", "col", ".", "getValue", "(", "r", ")", "except", "Exception", "as", "e", ":", "val", "=", "e", "if", "nullfunc", "(", "val", ")", "and", "r", "in", "rowsToFill", ":", "if", "lastval", ":", "col", ".", "setValue", "(", "r", ",", "lastval", ")", "n", "+=", "1", "else", ":", "lastval", "=", "val", "col", ".", "recalc", "(", ")", "status", "(", "\"filled %d values\"", "%", "n", ")" ]
Validate the parsed YAML file for adherance to the ChemKED format .
def validate_yaml ( self , properties ) : validator = OurValidator ( schema ) if not validator . validate ( properties ) : for key , value in validator . errors . items ( ) : if any ( [ 'unallowed value' in v for v in value ] ) : print ( ( '{key} has an illegal value. Allowed values are {values} and are case ' 'sensitive.' ) . format ( key = key , values = schema [ key ] [ 'allowed' ] ) ) raise ValueError ( validator . errors )
1,714
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L169-L186
[ "def", "fillNullValues", "(", "col", ",", "rows", ")", ":", "lastval", "=", "None", "nullfunc", "=", "isNullFunc", "(", ")", "n", "=", "0", "rowsToFill", "=", "list", "(", "rows", ")", "for", "r", "in", "Progress", "(", "col", ".", "sheet", ".", "rows", ",", "'filling'", ")", ":", "# loop over all rows", "try", ":", "val", "=", "col", ".", "getValue", "(", "r", ")", "except", "Exception", "as", "e", ":", "val", "=", "e", "if", "nullfunc", "(", "val", ")", "and", "r", "in", "rowsToFill", ":", "if", "lastval", ":", "col", ".", "setValue", "(", "r", ",", "lastval", ")", "n", "+=", "1", "else", ":", "lastval", "=", "val", "col", ".", "recalc", "(", ")", "status", "(", "\"filled %d values\"", "%", "n", ")" ]
Write new ChemKED YAML file based on object .
def write_file ( self , filename , * , overwrite = False ) : # Ensure file isn't already present if exists ( filename ) and not overwrite : raise OSError ( filename + ' already present. Specify "overwrite=True" ' 'to overwrite, or rename.' ) with open ( filename , 'w' ) as yaml_file : yaml . dump ( self . _properties , yaml_file )
1,715
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L310-L332
[ "def", "weighted_expiration", "(", "weights", ",", "contract_dates", ")", ":", "# NOQA", "cols", "=", "weights", ".", "columns", "weights", "=", "weights", ".", "reset_index", "(", "level", "=", "-", "1", ")", "expiries", "=", "contract_dates", ".", "to_dict", "(", ")", "weights", ".", "loc", "[", ":", ",", "\"expiry\"", "]", "=", "weights", ".", "iloc", "[", ":", ",", "0", "]", ".", "apply", "(", "lambda", "x", ":", "expiries", "[", "x", "]", ")", "diffs", "=", "(", "pd", ".", "DatetimeIndex", "(", "weights", ".", "expiry", ")", "-", "pd", ".", "Series", "(", "weights", ".", "index", ",", "weights", ".", "index", ")", ")", ".", "apply", "(", "lambda", "x", ":", "x", ".", "days", ")", "weights", "=", "weights", ".", "loc", "[", ":", ",", "cols", "]", "wexp", "=", "weights", ".", "mul", "(", "diffs", ",", "axis", "=", "0", ")", ".", "groupby", "(", "level", "=", "0", ")", ".", "sum", "(", ")", "return", "wexp" ]
Process the uncertainty information from a given quantity and return it
def process_quantity ( self , properties ) : quant = Q_ ( properties [ 0 ] ) if len ( properties ) > 1 : unc = properties [ 1 ] uncertainty = unc . get ( 'uncertainty' , False ) upper_uncertainty = unc . get ( 'upper-uncertainty' , False ) lower_uncertainty = unc . get ( 'lower-uncertainty' , False ) uncertainty_type = unc . get ( 'uncertainty-type' ) if uncertainty_type == 'relative' : if uncertainty : quant = quant . plus_minus ( float ( uncertainty ) , relative = True ) elif upper_uncertainty and lower_uncertainty : warn ( 'Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.' ) uncertainty = max ( float ( upper_uncertainty ) , float ( lower_uncertainty ) ) quant = quant . plus_minus ( uncertainty , relative = True ) else : raise ValueError ( 'Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.' ) elif uncertainty_type == 'absolute' : if uncertainty : uncertainty = Q_ ( uncertainty ) quant = quant . plus_minus ( uncertainty . to ( quant . units ) . magnitude ) elif upper_uncertainty and lower_uncertainty : warn ( 'Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.' ) uncertainty = max ( Q_ ( upper_uncertainty ) , Q_ ( lower_uncertainty ) ) quant = quant . plus_minus ( uncertainty . to ( quant . units ) . magnitude ) else : raise ValueError ( 'Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.' ) else : raise ValueError ( 'uncertainty-type must be one of "absolute" or "relative"' ) return quant
1,716
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L722-L760
[ "def", "start_session", "(", "self", ")", ":", "self", ".", "_android_api", ".", "start_session", "(", ")", "self", ".", "_manga_api", ".", "cr_start_session", "(", ")", "return", "self", ".", "session_started" ]
Get the composition in a string format suitable for input to Cantera .
def get_cantera_composition_string ( self , species_conversion = None ) : if self . composition_type in [ 'mole fraction' , 'mass fraction' ] : factor = 1.0 elif self . composition_type == 'mole percent' : factor = 100.0 else : raise ValueError ( 'Unknown composition type: {}' . format ( self . composition_type ) ) if species_conversion is None : comps = [ '{!s}:{:.4e}' . format ( c . species_name , c . amount . magnitude / factor ) for c in self . composition . values ( ) ] else : comps = [ ] for c in self . composition . values ( ) : amount = c . amount . magnitude / factor idents = [ getattr ( c , s , False ) for s in [ 'species_name' , 'InChI' , 'SMILES' ] ] present = [ i in species_conversion for i in idents ] if not any ( present ) : comps . append ( '{!s}:{:.4e}' . format ( c . species_name , amount ) ) else : if len ( [ i for i in present if i ] ) > 1 : raise ValueError ( 'More than one conversion present for species {}' . format ( c . species_name ) ) ident = idents [ present . index ( True ) ] species_replacement_name = species_conversion . pop ( ident ) comps . append ( '{!s}:{:.4e}' . format ( species_replacement_name , amount ) ) if len ( species_conversion ) > 0 : raise ValueError ( 'Unknown species in conversion: {}' . format ( species_conversion ) ) return ', ' . join ( comps )
1,717
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L762-L815
[ "def", "cleanup", "(", "keep_latest", "=", "True", ")", ":", "removed", "=", "[", "]", "# Loop over all installed kernel packages", "for", "kernel", "in", "list_installed", "(", ")", ":", "# Keep the active kernel package", "if", "kernel", "==", "active", "(", ")", ":", "continue", "# Optionally keep the latest kernel package", "if", "keep_latest", "and", "kernel", "==", "latest_installed", "(", ")", ":", "continue", "# Remove the kernel package", "removed", ".", "extend", "(", "remove", "(", "kernel", ")", "[", "'removed'", "]", ")", "return", "{", "'removed'", ":", "removed", "}" ]
Get the mole fractions in a string format suitable for input to Cantera .
def get_cantera_mole_fraction ( self , species_conversion = None ) : if self . composition_type == 'mass fraction' : raise ValueError ( 'Cannot get mole fractions from the given composition.\n' '{}' . format ( self . composition ) ) else : return self . get_cantera_composition_string ( species_conversion )
1,718
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L817-L851
[ "def", "remove_selected_classification", "(", "self", ")", ":", "removed_classes", "=", "self", ".", "hazard_class_form", ".", "selectedItems", "(", ")", "current_item", "=", "self", ".", "hazard_class_form", ".", "currentItem", "(", ")", "removed_index", "=", "self", ".", "hazard_class_form", ".", "indexFromItem", "(", "current_item", ")", "del", "self", ".", "classification", "[", "removed_index", ".", "row", "(", ")", "]", "for", "item", "in", "removed_classes", ":", "self", ".", "hazard_class_form", ".", "takeItem", "(", "self", ".", "hazard_class_form", ".", "row", "(", "item", ")", ")" ]
Get the mass fractions in a string format suitable for input to Cantera .
def get_cantera_mass_fraction ( self , species_conversion = None ) : if self . composition_type in [ 'mole fraction' , 'mole percent' ] : raise ValueError ( 'Cannot get mass fractions from the given composition.\n' '{}' . format ( self . composition ) ) else : return self . get_cantera_composition_string ( species_conversion )
1,719
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/chemked.py#L853-L889
[ "def", "cleanup", "(", "keep_latest", "=", "True", ")", ":", "removed", "=", "[", "]", "# Loop over all installed kernel packages", "for", "kernel", "in", "list_installed", "(", ")", ":", "# Keep the active kernel package", "if", "kernel", "==", "active", "(", ")", ":", "continue", "# Optionally keep the latest kernel package", "if", "keep_latest", "and", "kernel", "==", "latest_installed", "(", ")", ":", "continue", "# Remove the kernel package", "removed", ".", "extend", "(", "remove", "(", "kernel", ")", "[", "'removed'", "]", ")", "return", "{", "'removed'", ":", "removed", "}" ]
Only runs the method if the lockfile is not acquired .
def lockfile ( lockfile_name , lock_wait_timeout = - 1 ) : def decorator ( func ) : @ wraps ( func ) def wrapper ( * args , * * kwargs ) : lock = FileLock ( lockfile_name ) try : lock . acquire ( lock_wait_timeout ) except AlreadyLocked : return except LockTimeout : return try : result = func ( * args , * * kwargs ) finally : lock . release ( ) return result return wrapper return decorator
1,720
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/decorators.py#L11-L51
[ "def", "deserialize", "(", "obj", ")", ":", "# Be careful of shallow copy here", "target", "=", "dict", "(", "obj", ")", "class_name", "=", "None", "if", "'__class__'", "in", "target", ":", "class_name", "=", "target", ".", "pop", "(", "'__class__'", ")", "if", "'__module__'", "in", "obj", ":", "obj", ".", "pop", "(", "'__module__'", ")", "# Use getattr(module, class_name) for custom types if needed", "if", "class_name", "==", "'datetime'", ":", "return", "datetime", ".", "datetime", "(", "tzinfo", "=", "utc", ",", "*", "*", "target", ")", "if", "class_name", "==", "'StreamingBody'", ":", "return", "StringIO", "(", "target", "[", "'body'", "]", ")", "# Return unrecognized structures as-is", "return", "obj" ]
Checks if a string is a email adress or not .
def get_username ( identifier ) : pattern = re . compile ( '.+@\w+\..+' ) if pattern . match ( identifier ) : try : user = User . objects . get ( email = identifier ) except : raise Http404 else : return user . username else : return identifier
1,721
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/decorators.py#L54-L65
[ "def", "swap_buffers", "(", "self", ")", ":", "self", ".", "widget", ".", "swapBuffers", "(", ")", "self", ".", "set_default_viewport", "(", ")", "self", ".", "app", ".", "processEvents", "(", ")", "self", ".", "frames", "+=", "1" ]
Creates a payload with no none values .
def _create_payload ( self , symbols ) : payload = { 'access_key' : self . access_key } if symbols is not None : payload [ 'symbols' ] = ',' . join ( symbols ) return payload
1,722
https://github.com/amatellanes/fixerio/blob/0890e0ee3d39a2a3a2396d934c32bc9ed5f4c974/fixerio/client.py#L32-L44
[ "def", "cublasZgemm", "(", "handle", ",", "transa", ",", "transb", ",", "m", ",", "n", ",", "k", ",", "alpha", ",", "A", ",", "lda", ",", "B", ",", "ldb", ",", "beta", ",", "C", ",", "ldc", ")", ":", "status", "=", "_libcublas", ".", "cublasZgemm_v2", "(", "handle", ",", "_CUBLAS_OP", "[", "transa", "]", ",", "_CUBLAS_OP", "[", "transb", "]", ",", "m", ",", "n", ",", "k", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "alpha", ".", "real", ",", "alpha", ".", "imag", ")", ")", ",", "int", "(", "A", ")", ",", "lda", ",", "int", "(", "B", ")", ",", "ldb", ",", "ctypes", ".", "byref", "(", "cuda", ".", "cuDoubleComplex", "(", "beta", ".", "real", ",", "beta", ".", "imag", ")", ")", ",", "int", "(", "C", ")", ",", "ldc", ")", "cublasCheckStatus", "(", "status", ")" ]
Get historical rates for any day since date .
def historical_rates ( self , date , symbols = None ) : try : if isinstance ( date , datetime . date ) : # Convert date to ISO 8601 format. date = date . isoformat ( ) symbols = symbols or self . symbols payload = self . _create_payload ( symbols ) url = BASE_URL + date response = requests . get ( url , params = payload ) response . raise_for_status ( ) return response . json ( ) except requests . exceptions . RequestException as ex : raise FixerioException ( str ( ex ) )
1,723
https://github.com/amatellanes/fixerio/blob/0890e0ee3d39a2a3a2396d934c32bc9ed5f4c974/fixerio/client.py#L69-L97
[ "def", "commit", "(", "self", ")", ":", "if", "self", ".", "session", "is", "not", "None", ":", "logger", ".", "info", "(", "\"committing transaction in %s\"", "%", "self", ")", "tmp", "=", "self", ".", "stable", "self", ".", "stable", ",", "self", ".", "session", "=", "self", ".", "session", ",", "None", "self", ".", "istable", "=", "1", "-", "self", ".", "istable", "self", ".", "write_istable", "(", ")", "tmp", ".", "close", "(", ")", "# don't wait for gc, release resources manually", "self", ".", "lock_update", ".", "release", "(", ")", "else", ":", "logger", ".", "warning", "(", "\"commit called but there's no open session in %s\"", "%", "self", ")" ]
Return a list where the duplicates have been removed .
def distinct ( l ) : seen = set ( ) seen_add = seen . add return ( _ for _ in l if not ( _ in seen or seen_add ( _ ) ) )
1,724
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/list.py#L6-L18
[ "def", "_initialize_progress_bar", "(", "self", ")", ":", "widgets", "=", "[", "'Download: '", ",", "Percentage", "(", ")", ",", "' '", ",", "Bar", "(", ")", ",", "' '", ",", "AdaptiveETA", "(", ")", ",", "' '", ",", "FileTransferSpeed", "(", ")", "]", "self", ".", "_downloadProgressBar", "=", "ProgressBar", "(", "widgets", "=", "widgets", ",", "max_value", "=", "self", ".", "_imageCount", ")", ".", "start", "(", ")" ]
Does the heavy lifting of finding format modules .
def iter_format_modules ( lang ) : if check_for_language ( lang ) : format_locations = [ ] for path in CUSTOM_FORMAT_MODULE_PATHS : format_locations . append ( path + '.%s' ) format_locations . append ( 'django.conf.locale.%s' ) locale = to_locale ( lang ) locales = [ locale ] if '_' in locale : locales . append ( locale . split ( '_' ) [ 0 ] ) for location in format_locations : for loc in locales : try : yield import_module ( '.formats' , location % loc ) except ImportError : pass
1,725
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/format_utils.py#L50-L69
[ "def", "delete", "(", "self", ")", ":", "response", "=", "self", ".", "http_request", "(", "self", ".", "_url", ",", "'DELETE'", ")", "if", "response", ".", "status", "!=", "204", ":", "self", ".", "raise_http_error", "(", "response", ")" ]
Returns a list of the format modules found
def get_format_modules ( lang = None , reverse = False ) : if lang is None : lang = get_language ( ) modules = _format_modules_cache . setdefault ( lang , list ( iter_format_modules ( lang ) ) ) if reverse : return list ( reversed ( modules ) ) return modules
1,726
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/format_utils.py#L72-L83
[ "def", "_connect", "(", "self", ")", ":", "try", ":", "# Open Connection", "self", ".", "influx", "=", "InfluxDBClient", "(", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "username", ",", "self", ".", "password", ",", "self", ".", "database", ",", "self", ".", "ssl", ")", "# Log", "self", ".", "log", ".", "debug", "(", "\"InfluxdbHandler: Established connection to \"", "\"%s:%d/%s.\"", ",", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "database", ")", "except", "Exception", "as", "ex", ":", "# Log Error", "self", ".", "_throttle_error", "(", "\"InfluxdbHandler: Failed to connect to \"", "\"%s:%d/%s. %s\"", ",", "self", ".", "hostname", ",", "self", ".", "port", ",", "self", ".", "database", ",", "ex", ")", "# Close Socket", "self", ".", "_close", "(", ")", "return" ]
Main entry point for a request - response process .
def as_view ( cls , * * initkwargs ) : # sanitize keyword arguments for key in initkwargs : if key in cls . http_method_names : raise TypeError ( "You tried to pass in the %s method name as a " "keyword argument to %s(). Don't do that." % ( key , cls . __name__ ) ) if not hasattr ( cls , key ) : raise TypeError ( "%s() received an invalid keyword %r. as_view " "only accepts arguments that are already " "attributes of the class." % ( cls . __name__ , key ) ) def view ( request , * args , * * kwargs ) : self = cls ( * * initkwargs ) if hasattr ( self , 'get' ) and not hasattr ( self , 'head' ) : self . head = self . get self . request = request self . args = args self . kwargs = kwargs self . authed_view = initkwargs . get ( 'authed_view' ) self . authed_view_kwargs = initkwargs . get ( 'authed_view_kwargs' ) self . anonymous_view = initkwargs . get ( 'anonymous_view' ) self . anonymous_view_kwargs = initkwargs . get ( 'anonymous_view_kwargs' ) return self . dispatch ( request , * args , * * kwargs ) # take name and docstring from class update_wrapper ( view , cls , updated = ( ) ) # and possible attributes set by decorators # like csrf_exempt from dispatch update_wrapper ( view , cls . dispatch , assigned = ( ) ) return view
1,727
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/views.py#L54-L88
[ "def", "_get_apis_with_config", "(", "self", ",", "logical_id", ")", ":", "properties", "=", "self", ".", "_get_properties", "(", "logical_id", ")", "# These configs need to be applied to each API", "binary_media", "=", "sorted", "(", "list", "(", "properties", ".", "binary_media_types", ")", ")", "# Also sort the list to keep the ordering stable", "cors", "=", "properties", ".", "cors", "result", "=", "[", "]", "for", "api", "in", "properties", ".", "apis", ":", "# Create a copy of the API with updated configuration", "updated_api", "=", "api", ".", "_replace", "(", "binary_media_types", "=", "binary_media", ",", "cors", "=", "cors", ")", "result", ".", "append", "(", "updated_api", ")", "return", "result" ]
Get the context .
def context ( self ) : stats = status_codes_by_date_stats ( ) attacks_data = [ { 'type' : 'line' , 'zIndex' : 9 , 'name' : _ ( 'Attacks' ) , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 'attacks' ] ) for v in stats ] } ] codes_data = [ { 'zIndex' : 4 , 'name' : '2xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 200 ] ) for v in stats ] } , { 'zIndex' : 5 , 'name' : '3xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 300 ] ) for v in stats ] } , { 'zIndex' : 6 , 'name' : '4xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 400 ] ) for v in stats ] } , { 'zIndex' : 8 , 'name' : '5xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 500 ] ) for v in stats ] } ] return { 'generic_chart' : json . dumps ( status_codes_by_date_chart ( ) ) , 'attacks_data' : json . dumps ( attacks_data ) , 'codes_data' : json . dumps ( codes_data ) }
1,728
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/boxes.py#L68-L100
[ "def", "libvlc_video_set_crop_geometry", "(", "p_mi", ",", "psz_geometry", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_video_set_crop_geometry'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_set_crop_geometry'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_char_p", ")", "return", "f", "(", "p_mi", ",", "psz_geometry", ")" ]
Get the items .
def widgets ( self ) : widgets = [ ] for i , chart in enumerate ( most_visited_pages_charts ( ) ) : widgets . append ( Widget ( html_id = 'most_visited_chart_%d' % i , content = json . dumps ( chart ) , template = 'meerkat/widgets/highcharts.html' , js_code = [ 'plotOptions.tooltip.pointFormatter' ] ) ) return widgets
1,729
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/boxes.py#L118-L127
[ "def", "start_publishing", "(", "mysql_settings", ",", "*", "*", "kwargs", ")", ":", "_logger", ".", "info", "(", "'Start publishing from %s with:\\n%s'", "%", "(", "mysql_settings", ",", "kwargs", ")", ")", "kwargs", ".", "setdefault", "(", "'server_id'", ",", "random", ".", "randint", "(", "1000000000", ",", "4294967295", ")", ")", "kwargs", ".", "setdefault", "(", "'freeze_schema'", ",", "True", ")", "# connect to binlog stream", "stream", "=", "pymysqlreplication", ".", "BinLogStreamReader", "(", "mysql_settings", ",", "only_events", "=", "[", "row_event", ".", "DeleteRowsEvent", ",", "row_event", ".", "UpdateRowsEvent", ",", "row_event", ".", "WriteRowsEvent", "]", ",", "*", "*", "kwargs", ")", "\"\"\":type list[RowsEvent]\"\"\"", "for", "event", "in", "stream", ":", "# ignore non row events", "if", "not", "isinstance", "(", "event", ",", "row_event", ".", "RowsEvent", ")", ":", "continue", "_logger", ".", "debug", "(", "'Send binlog signal \"%s@%s.%s\"'", "%", "(", "event", ".", "__class__", ".", "__name__", ",", "event", ".", "schema", ",", "event", ".", "table", ")", ")", "signals", ".", "binlog_signal", ".", "send", "(", "event", ",", "stream", "=", "stream", ")", "signals", ".", "binlog_position_signal", ".", "send", "(", "(", "stream", ".", "log_file", ",", "stream", ".", "log_pos", ")", ")" ]
Retrieves a scale representation from it s string representation
def get ( self , str_representation ) : for scale in self . SCALES : if str_representation in scale : return scale raise ScaleFormatError ( "Unsupported scale format: {0}" . format ( str_representation ) )
1,730
https://github.com/oleiade/durations/blob/62c176dfa7d36d5c59bf93bdebfdc80ab53757bd/durations/scales.py#L52-L65
[ "def", "_parse_comments", "(", "element", ")", ":", "comments", "=", "[", "]", "items", "=", "element", ".", "findall", "(", "\"./{%s}comment\"", "%", "WP_NAMESPACE", ")", "for", "item", "in", "items", ":", "comment_id", "=", "item", ".", "find", "(", "\"./{%s}comment_id\"", "%", "WP_NAMESPACE", ")", ".", "text", "author", "=", "item", ".", "find", "(", "\"./{%s}comment_author\"", "%", "WP_NAMESPACE", ")", ".", "text", "email", "=", "item", ".", "find", "(", "\"./{%s}comment_author_email\"", "%", "WP_NAMESPACE", ")", ".", "text", "author_url", "=", "item", ".", "find", "(", "\"./{%s}comment_author_url\"", "%", "WP_NAMESPACE", ")", ".", "text", "author_ip", "=", "item", ".", "find", "(", "\"./{%s}comment_author_IP\"", "%", "WP_NAMESPACE", ")", ".", "text", "date", "=", "item", ".", "find", "(", "\"./{%s}comment_date\"", "%", "WP_NAMESPACE", ")", ".", "text", "date_gmt", "=", "item", ".", "find", "(", "\"./{%s}comment_date_gmt\"", "%", "WP_NAMESPACE", ")", ".", "text", "content", "=", "item", ".", "find", "(", "\"./{%s}comment_content\"", "%", "WP_NAMESPACE", ")", ".", "text", "approved", "=", "item", ".", "find", "(", "\"./{%s}comment_approved\"", "%", "WP_NAMESPACE", ")", ".", "text", "comment_type", "=", "item", ".", "find", "(", "\"./{%s}comment_type\"", "%", "WP_NAMESPACE", ")", ".", "text", "parent", "=", "item", ".", "find", "(", "\"./{%s}comment_parent\"", "%", "WP_NAMESPACE", ")", ".", "text", "user_id", "=", "item", ".", "find", "(", "\"./{%s}comment_user_id\"", "%", "WP_NAMESPACE", ")", ".", "text", "comment", "=", "{", "\"id\"", ":", "comment_id", ",", "\"author\"", ":", "author", ",", "\"author_email\"", ":", "email", ",", "\"author_url\"", ":", "author_url", ",", "\"author_ip\"", ":", "author_ip", ",", "\"date\"", ":", "date", ",", "\"date_gmt\"", ":", "date_gmt", ",", "\"content\"", ":", "content", ",", "\"approved\"", ":", "approved", ",", "\"type\"", ":", "comment_type", ",", "\"parent\"", ":", "parent", ",", "\"user_id\"", ":", "user_id", ",", "}", "comments", ".", "append", "(", "comment", ")", "return", "comments" ]
Asserts a provided string is a valid duration token representation
def valid_token ( token ) : is_scale = False # Check if the token represents a scale # If it doesn't set a flag accordingly try : Scale ( token ) is_scale = True except ScaleFormatError : pass # If token either represents a numerical value, a # separator token, or a scale, it is considered valid if any ( [ token . isdigit ( ) , token in SEPARATOR_TOKENS , is_scale ] ) : return True return False
1,731
https://github.com/oleiade/durations/blob/62c176dfa7d36d5c59bf93bdebfdc80ab53757bd/durations/parser.py#L10-L31
[ "def", "reassign_comment_to_book", "(", "self", ",", "comment_id", ",", "from_book_id", ",", "to_book_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin", "self", ".", "assign_comment_to_book", "(", "comment_id", ",", "to_book_id", ")", "try", ":", "self", ".", "unassign_comment_from_book", "(", "comment_id", ",", "from_book_id", ")", "except", ":", "# something went wrong, roll back assignment to to_book_id", "self", ".", "unassign_comment_from_book", "(", "comment_id", ",", "to_book_id", ")", "raise" ]
Extracts durations tokens from a duration representation .
def extract_tokens ( representation , separators = SEPARATOR_CHARACTERS ) : buff = "" elements = [ ] last_index = 0 last_token = None for index , c in enumerate ( representation ) : # if separator character is found, push # the content of the buffer in the elements list if c in separators : if buff : # If the last found token is invalid, # raise and InvalidTokenError if not valid_token ( buff ) : raise InvalidTokenError ( "Duration representation {0} contains " "an invalid token: {1}" . format ( representation , buff ) ) # If buffer content is a separator word, for example # "and", just ignore it if not buff . strip ( ) in SEPARATOR_TOKENS : elements . append ( buff ) # Anyway, reset buffer and last token marker # to their zero value buff = "" last_token = None else : token = compute_char_token ( c ) if ( token is not None and last_token is not None and token != last_token ) : elements . append ( buff ) buff = c else : buff += c last_token = token # push the content left in representation # in the elements list elements . append ( buff ) return list ( zip ( elements [ : : 2 ] , elements [ 1 : : 2 ] ) )
1,732
https://github.com/oleiade/durations/blob/62c176dfa7d36d5c59bf93bdebfdc80ab53757bd/durations/parser.py#L43-L93
[ "def", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ":", "v", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "a", "=", "np", ".", "array", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ",", "copy", "=", "True", ")", "v", "-=", "a", "*", "np", ".", "dot", "(", "a", ",", "v", ")", "# on plane", "n", "=", "vector_norm", "(", "v", ")", "if", "n", ">", "_EPS", ":", "if", "v", "[", "2", "]", "<", "0.0", ":", "np", ".", "negative", "(", "v", ",", "v", ")", "v", "/=", "n", "return", "v", "if", "a", "[", "2", "]", "==", "1.0", ":", "return", "np", ".", "array", "(", "[", "1.0", ",", "0.0", ",", "0.0", "]", ")", "return", "unit_vector", "(", "[", "-", "a", "[", "1", "]", ",", "a", "[", "0", "]", ",", "0.0", "]", ")" ]
Returns a random string based on the provided arguments .
def create_random_string ( length = 7 , chars = 'ABCDEFGHJKMNPQRSTUVWXYZ23456789' , repetitions = False ) : if repetitions : return '' . join ( random . choice ( chars ) for _ in range ( length ) ) return '' . join ( random . sample ( chars , length ) )
1,733
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/utils/text.py#L5-L16
[ "def", "on_session_end", "(", "self", ",", "session", ")", ":", "if", "not", "isinstance", "(", "session", ",", "ISession", ")", ":", "raise", "TypeError", "(", "\"session can only be an instance of type ISession\"", ")", "progress", "=", "self", ".", "_call", "(", "\"onSessionEnd\"", ",", "in_p", "=", "[", "session", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
Loads and returns a class for a given fully qualified name .
def load_member ( fqn ) : modulename , member_name = split_fqn ( fqn ) module = __import__ ( modulename , globals ( ) , locals ( ) , member_name ) return getattr ( module , member_name )
1,734
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/loaders.py#L5-L9
[ "def", "render", "(", "self", ",", "data", ",", "accepted_media_type", "=", "None", ",", "renderer_context", "=", "None", ")", ":", "if", "'SWAGGER_JSON_PATH'", "in", "os", ".", "environ", ":", "with", "io", ".", "open", "(", "os", ".", "environ", "[", "'SWAGGER_JSON_PATH'", "]", ",", "'rb'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "else", ":", "return", "super", "(", "ConditionalOpenAPIRenderer", ",", "self", ")", ".", "render", "(", "data", ",", "accepted_media_type", ",", "renderer_context", ")" ]
Returns the left and right part of the import .
def split_fqn ( fqn ) : if hasattr ( fqn , '__call__' ) : fqn_string = fqn ( ) else : fqn_string = fqn return fqn_string . rsplit ( '.' , 1 )
1,735
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/loaders.py#L18-L30
[ "def", "DefaultAdapter", "(", "self", ")", ":", "default_adapter", "=", "None", "for", "obj", "in", "mockobject", ".", "objects", ".", "keys", "(", ")", ":", "if", "obj", ".", "startswith", "(", "'/org/bluez/'", ")", "and", "'dev_'", "not", "in", "obj", ":", "default_adapter", "=", "obj", "if", "default_adapter", ":", "return", "dbus", ".", "ObjectPath", "(", "default_adapter", ",", "variant_level", "=", "1", ")", "else", ":", "raise", "dbus", ".", "exceptions", ".", "DBusException", "(", "'No such adapter.'", ",", "name", "=", "'org.bluez.Error.NoSuchAdapter'", ")" ]
Sends data to the server .
def send ( self , data ) : self . logger . debug ( 'Send data: {}' . format ( data ) ) if not self . connected : self . logger . warning ( 'Connection not established. Return...' ) return self . websocket . send ( json . dumps ( data ) )
1,736
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L97-L107
[ "def", "materialize", "(", "self", ",", "ref", ",", "table_name", "=", "None", ",", "index_columns", "=", "None", ",", "logger", "=", "None", ")", ":", "from", "ambry", ".", "library", "import", "Library", "assert", "isinstance", "(", "self", ".", "_library", ",", "Library", ")", "logger", ".", "debug", "(", "'Materializing warehouse partition.\\n partition: {}'", ".", "format", "(", "ref", ")", ")", "partition", "=", "self", ".", "_library", ".", "partition", "(", "ref", ")", "connection", "=", "self", ".", "_backend", ".", "_get_connection", "(", ")", "return", "self", ".", "_backend", ".", "install", "(", "connection", ",", "partition", ",", "table_name", "=", "table_name", ",", "index_columns", "=", "index_columns", ",", "materialize", "=", "True", ",", "logger", "=", "logger", ")" ]
Called aways when a message arrives .
def _on_message ( self , socket , message ) : data = json . loads ( message ) message_type = None identifier = None subscription = None if 'type' in data : message_type = data [ 'type' ] if 'identifier' in data : identifier = json . loads ( data [ 'identifier' ] ) if identifier is not None : subscription = self . find_subscription ( identifier ) if subscription is not None : subscription . received ( data ) elif message_type == 'welcome' : self . logger . debug ( 'Welcome message received.' ) for subscription in self . subscriptions . values ( ) : if subscription . state == 'connection_pending' : subscription . create ( ) elif message_type == 'ping' : if self . log_ping : self . logger . debug ( 'Ping received.' ) else : self . logger . warning ( 'Message not supported. (Message: {})' . format ( message ) )
1,737
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L116-L147
[ "def", "create_index", "(", "config", ")", ":", "filename", "=", "pathlib", ".", "Path", "(", "config", ".", "cache_path", ")", "/", "\"index.json\"", "index", "=", "{", "\"version\"", ":", "__version__", "}", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "out", ":", "out", ".", "write", "(", "json", ".", "dumps", "(", "index", ",", "indent", "=", "2", ")", ")" ]
Called when the connection was closed .
def _on_close ( self , socket ) : self . logger . debug ( 'Connection closed.' ) for subscription in self . subscriptions . values ( ) : if subscription . state == 'subscribed' : subscription . state = 'connection_pending'
1,738
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L149-L157
[ "def", "Run", "(", "self", ",", "args", ")", ":", "with", "vfs", ".", "VFSOpen", "(", "args", ".", "pathspec", ",", "progress_callback", "=", "self", ".", "Progress", ")", "as", "file_obj", ":", "fingerprinter", "=", "Fingerprinter", "(", "self", ".", "Progress", ",", "file_obj", ")", "response", "=", "rdf_client_action", ".", "FingerprintResponse", "(", ")", "response", ".", "pathspec", "=", "file_obj", ".", "pathspec", "if", "args", ".", "tuples", ":", "tuples", "=", "args", ".", "tuples", "else", ":", "# There are none selected -- we will cover everything", "tuples", "=", "list", "(", ")", "for", "k", "in", "self", ".", "_fingerprint_types", ":", "tuples", ".", "append", "(", "rdf_client_action", ".", "FingerprintTuple", "(", "fp_type", "=", "k", ")", ")", "for", "finger", "in", "tuples", ":", "hashers", "=", "[", "self", ".", "_hash_types", "[", "h", "]", "for", "h", "in", "finger", ".", "hashers", "]", "or", "None", "if", "finger", ".", "fp_type", "in", "self", ".", "_fingerprint_types", ":", "invoke", "=", "self", ".", "_fingerprint_types", "[", "finger", ".", "fp_type", "]", "res", "=", "invoke", "(", "fingerprinter", ",", "hashers", ")", "if", "res", ":", "response", ".", "matching_types", ".", "append", "(", "finger", ".", "fp_type", ")", "else", ":", "raise", "RuntimeError", "(", "\"Encountered unknown fingerprint type. %s\"", "%", "finger", ".", "fp_type", ")", "# Structure of the results is a list of dicts, each containing the", "# name of the hashing method, hashes for enabled hash algorithms,", "# and auxilliary data where present (e.g. signature blobs).", "# Also see Fingerprint:HashIt()", "response", ".", "results", "=", "fingerprinter", ".", "HashIt", "(", ")", "# We now return data in a more structured form.", "for", "result", "in", "response", ".", "results", ":", "if", "result", ".", "GetItem", "(", "\"name\"", ")", "==", "\"generic\"", ":", "for", "hash_type", "in", "[", "\"md5\"", ",", "\"sha1\"", ",", "\"sha256\"", "]", ":", "value", "=", "result", ".", "GetItem", "(", "hash_type", ")", "if", "value", "is", "not", "None", ":", "setattr", "(", "response", ".", "hash", ",", "hash_type", ",", "value", ")", "if", "result", "[", "\"name\"", "]", "==", "\"pecoff\"", ":", "for", "hash_type", "in", "[", "\"md5\"", ",", "\"sha1\"", ",", "\"sha256\"", "]", ":", "value", "=", "result", ".", "GetItem", "(", "hash_type", ")", "if", "value", ":", "setattr", "(", "response", ".", "hash", ",", "\"pecoff_\"", "+", "hash_type", ",", "value", ")", "signed_data", "=", "result", ".", "GetItem", "(", "\"SignedData\"", ",", "[", "]", ")", "for", "data", "in", "signed_data", ":", "response", ".", "hash", ".", "signed_data", ".", "Append", "(", "revision", "=", "data", "[", "0", "]", ",", "cert_type", "=", "data", "[", "1", "]", ",", "certificate", "=", "data", "[", "2", "]", ")", "self", ".", "SendReply", "(", "response", ")" ]
If connected to server .
def connected ( self ) : return self . websocket is not None and self . websocket . sock is not None and self . websocket . sock . connected
1,739
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L167-L173
[ "def", "open_visa_library", "(", "specification", ")", ":", "if", "not", "specification", ":", "logger", ".", "debug", "(", "'No visa library specified, trying to find alternatives.'", ")", "try", ":", "specification", "=", "os", ".", "environ", "[", "'PYVISA_LIBRARY'", "]", "except", "KeyError", ":", "logger", ".", "debug", "(", "'Environment variable PYVISA_LIBRARY is unset.'", ")", "try", ":", "argument", ",", "wrapper", "=", "specification", ".", "split", "(", "'@'", ")", "except", "ValueError", ":", "argument", "=", "specification", "wrapper", "=", "None", "# Flag that we need a fallback, but avoid nested exceptions", "if", "wrapper", "is", "None", ":", "if", "argument", ":", "# some filename given", "wrapper", "=", "'ni'", "else", ":", "wrapper", "=", "_get_default_wrapper", "(", ")", "cls", "=", "get_wrapper_class", "(", "wrapper", ")", "try", ":", "return", "cls", "(", "argument", ")", "except", "Exception", "as", "e", ":", "logger", ".", "debug", "(", "'Could not open VISA wrapper %s: %s\\n%s'", ",", "cls", ",", "str", "(", "argument", ")", ",", "e", ")", "raise" ]
Finds a subscription by it s identifier .
def find_subscription ( self , identifier ) : for subscription in self . subscriptions . values ( ) : if subscription . identifier == identifier : return subscription
1,740
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/connection.py#L175-L182
[ "def", "_index_local_ref", "(", "fasta_file", ",", "cortex_dir", ",", "stampy_dir", ",", "kmers", ")", ":", "base_out", "=", "os", ".", "path", ".", "splitext", "(", "fasta_file", ")", "[", "0", "]", "cindexes", "=", "[", "]", "for", "kmer", "in", "kmers", ":", "out_file", "=", "\"{0}.k{1}.ctx\"", ".", "format", "(", "base_out", ",", "kmer", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "file_list", "=", "\"{0}.se_list\"", ".", "format", "(", "base_out", ")", "with", "open", "(", "file_list", ",", "\"w\"", ")", "as", "out_handle", ":", "out_handle", ".", "write", "(", "fasta_file", "+", "\"\\n\"", ")", "subprocess", ".", "check_call", "(", "[", "_get_cortex_binary", "(", "kmer", ",", "cortex_dir", ")", ",", "\"--kmer_size\"", ",", "str", "(", "kmer", ")", ",", "\"--mem_height\"", ",", "\"17\"", ",", "\"--se_list\"", ",", "file_list", ",", "\"--format\"", ",", "\"FASTA\"", ",", "\"--max_read_len\"", ",", "\"30000\"", ",", "\"--sample_id\"", ",", "base_out", ",", "\"--dump_binary\"", ",", "out_file", "]", ")", "cindexes", ".", "append", "(", "out_file", ")", "if", "not", "file_exists", "(", "\"{0}.stidx\"", ".", "format", "(", "base_out", ")", ")", ":", "subprocess", ".", "check_call", "(", "[", "os", ".", "path", ".", "join", "(", "stampy_dir", ",", "\"stampy.py\"", ")", ",", "\"-G\"", ",", "base_out", ",", "fasta_file", "]", ")", "subprocess", ".", "check_call", "(", "[", "os", ".", "path", ".", "join", "(", "stampy_dir", ",", "\"stampy.py\"", ")", ",", "\"-g\"", ",", "base_out", ",", "\"-H\"", ",", "base_out", "]", ")", "return", "{", "\"stampy\"", ":", "base_out", ",", "\"cortex\"", ":", "cindexes", ",", "\"fasta\"", ":", "[", "fasta_file", "]", "}" ]
Subscribes at the server .
def create ( self ) : self . logger . debug ( 'Create subscription on server...' ) if not self . connection . connected : self . state = 'connection_pending' return data = { 'command' : 'subscribe' , 'identifier' : self . _identifier_string ( ) } self . connection . send ( data ) self . state = 'pending'
1,741
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/subscription.py#L33-L49
[ "def", "write_tables_to_fits", "(", "filepath", ",", "tablelist", ",", "clobber", "=", "False", ",", "namelist", "=", "None", ",", "cardslist", "=", "None", ",", "hdu_list", "=", "None", ")", ":", "outhdulist", "=", "[", "fits", ".", "PrimaryHDU", "(", ")", "]", "rmlist", "=", "[", "]", "for", "i", ",", "table", "in", "enumerate", "(", "tablelist", ")", ":", "ft_name", "=", "\"%s._%i\"", "%", "(", "filepath", ",", "i", ")", "rmlist", ".", "append", "(", "ft_name", ")", "try", ":", "os", ".", "unlink", "(", "ft_name", ")", "except", ":", "pass", "table", ".", "write", "(", "ft_name", ",", "format", "=", "\"fits\"", ")", "ft_in", "=", "fits", ".", "open", "(", "ft_name", ")", "if", "namelist", ":", "ft_in", "[", "1", "]", ".", "name", "=", "namelist", "[", "i", "]", "if", "cardslist", ":", "for", "k", ",", "v", "in", "cardslist", "[", "i", "]", ".", "items", "(", ")", ":", "ft_in", "[", "1", "]", ".", "header", "[", "k", "]", "=", "v", "ft_in", "[", "1", "]", ".", "update", "(", ")", "outhdulist", "+=", "[", "ft_in", "[", "1", "]", "]", "if", "hdu_list", "is", "not", "None", ":", "for", "h", "in", "hdu_list", ":", "outhdulist", ".", "append", "(", "h", ")", "fits", ".", "HDUList", "(", "outhdulist", ")", ".", "writeto", "(", "filepath", ",", "overwrite", "=", "clobber", ")", "for", "rm", "in", "rmlist", ":", "os", ".", "unlink", "(", "rm", ")" ]
Removes the subscription .
def remove ( self ) : self . logger . debug ( 'Remove subscription from server...' ) data = { 'command' : 'unsubscribe' , 'identifier' : self . _identifier_string ( ) } self . connection . send ( data ) self . state = 'unsubscribed'
1,742
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/subscription.py#L51-L63
[ "def", "delete", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "source_cache", "=", "self", ".", "get_source_cache", "(", ")", "# First, delete any related thumbnails.", "self", ".", "delete_thumbnails", "(", "source_cache", ")", "# Next, delete the source image.", "super", "(", "ThumbnailerFieldFile", ",", "self", ")", ".", "delete", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Finally, delete the source cache entry.", "if", "source_cache", "and", "source_cache", ".", "pk", "is", "not", "None", ":", "source_cache", ".", "delete", "(", ")" ]
Sends data to the server on the subscription channel .
def send ( self , message ) : self . logger . debug ( 'Send message: {}' . format ( message ) ) if self . state == 'pending' or self . state == 'connection_pending' : self . logger . info ( 'Connection not established. Add message to queue.' ) self . message_queue . append ( message ) return elif self . state == 'unsubscribed' or self . state == 'rejected' : self . logger . warning ( 'Not subscribed! Message discarded.' ) return data = { 'command' : 'message' , 'identifier' : self . _identifier_string ( ) , 'data' : message . raw_message ( ) } self . connection . send ( data )
1,743
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/subscription.py#L65-L88
[ "def", "read_binary", "(", "self", ",", "ba", ",", "param_groups", "=", "None", ")", ":", "if", "ba", "is", "None", ":", "return", "[", "]", "pgr", "=", "ba", ".", "find", "(", "'m:referenceableParamGroupRef'", ",", "namespaces", "=", "self", ".", "ns", ")", "if", "pgr", "is", "not", "None", "and", "param_groups", "is", "not", "None", ":", "q", "=", "'m:referenceableParamGroup[@id=\"'", "+", "pgr", ".", "get", "(", "'ref'", ")", "+", "'\"]'", "pg", "=", "param_groups", ".", "find", "(", "q", ",", "namespaces", "=", "self", ".", "ns", ")", "else", ":", "pg", "=", "ba", "if", "pg", ".", "find", "(", "'m:cvParam[@accession=\"MS:1000574\"]'", ",", "namespaces", "=", "self", ".", "ns", ")", "is", "not", "None", ":", "compress", "=", "True", "elif", "pg", ".", "find", "(", "'m:cvParam[@accession=\"MS:1000576\"]'", ",", "namespaces", "=", "self", ".", "ns", ")", "is", "not", "None", ":", "compress", "=", "False", "else", ":", "# TODO: no info? should check the other record?", "pass", "if", "pg", ".", "find", "(", "'m:cvParam[@accession=\"MS:1000521\"]'", ",", "namespaces", "=", "self", ".", "ns", ")", "is", "not", "None", ":", "dtype", "=", "'f'", "elif", "pg", ".", "find", "(", "'m:cvParam[@accession=\"MS:1000523\"]'", ",", "namespaces", "=", "self", ".", "ns", ")", "is", "not", "None", ":", "dtype", "=", "'d'", "else", ":", "# TODO: no info? should check the other record?", "pass", "datatext", "=", "ba", ".", "find", "(", "'m:binary'", ",", "namespaces", "=", "self", ".", "ns", ")", ".", "text", "if", "compress", ":", "rawdata", "=", "zlib", ".", "decompress", "(", "base64", ".", "b64decode", "(", "datatext", ")", ")", "else", ":", "rawdata", "=", "base64", ".", "b64decode", "(", "datatext", ")", "return", "np", ".", "fromstring", "(", "rawdata", ",", "dtype", "=", "dtype", ")" ]
API for the connection to forward information to this subscription instance .
def received ( self , data ) : self . logger . debug ( 'Data received: {}' . format ( data ) ) message_type = None if 'type' in data : message_type = data [ 'type' ] if message_type == 'confirm_subscription' : self . _subscribed ( ) elif message_type == 'reject_subscription' : self . _rejected ( ) elif self . receive_callback is not None and 'message' in data : self . receive_callback ( data [ 'message' ] ) else : self . logger . warning ( 'Message type unknown. ({})' . format ( message_type ) )
1,744
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/subscription.py#L101-L123
[ "def", "init_indexes", "(", "self", ")", ":", "state", "=", "self", ".", "app_state", "for", "name", ",", "schema", "in", "self", ".", "schemas", ".", "items", "(", ")", ":", "if", "current_app", ".", "testing", ":", "storage", "=", "TestingStorage", "(", ")", "else", ":", "index_path", "=", "(", "Path", "(", "state", ".", "whoosh_base", ")", "/", "name", ")", ".", "absolute", "(", ")", "if", "not", "index_path", ".", "exists", "(", ")", ":", "index_path", ".", "mkdir", "(", "parents", "=", "True", ")", "storage", "=", "FileStorage", "(", "str", "(", "index_path", ")", ")", "if", "storage", ".", "index_exists", "(", "name", ")", ":", "index", "=", "FileIndex", "(", "storage", ",", "schema", ",", "name", ")", "else", ":", "index", "=", "FileIndex", ".", "create", "(", "storage", ",", "schema", ",", "name", ")", "state", ".", "indexes", "[", "name", "]", "=", "index" ]
Called when the subscription was accepted successfully .
def _subscribed ( self ) : self . logger . debug ( 'Subscription confirmed.' ) self . state = 'subscribed' for message in self . message_queue : self . send ( message )
1,745
https://github.com/tobiasfeistmantl/python-actioncable-zwei/blob/04876b4425a295485af8976acceb0b46d2ef1c8d/actioncable/subscription.py#L125-L133
[ "def", "update", "(", "self", ",", "data", ")", ":", "self", ".", "_md", ".", "update", "(", "data", ")", "bufpos", "=", "self", ".", "_nbytes", "&", "63", "self", ".", "_nbytes", "+=", "len", "(", "data", ")", "if", "self", ".", "_rarbug", "and", "len", "(", "data", ")", ">", "64", ":", "dpos", "=", "self", ".", "block_size", "-", "bufpos", "while", "dpos", "+", "self", ".", "block_size", "<=", "len", "(", "data", ")", ":", "self", ".", "_corrupt", "(", "data", ",", "dpos", ")", "dpos", "+=", "self", ".", "block_size" ]
Handle the command line options
def cli_opts ( ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( "--homeassistant-config" , type = str , required = False , dest = "config" , help = "Create configuration section for home assistant" , ) parser . add_argument ( "-f" , "--filter" , type = str , required = False , dest = "filter" , help = "Ignore events related with these devices" , ) parser . add_argument ( "-o" , "--output" , type = str , required = False , dest = "output" , help = "Send output to file" , ) parser . add_argument ( "-v" , "--verbose" , action = "store_true" , dest = "verbose" , help = "Verbose output" , ) parser . add_argument ( 'device' ) return parser . parse_args ( )
1,746
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L13-L44
[ "def", "reconcile", "(", "constraint", ")", ":", "if", "isinstance", "(", "constraint", ".", "subtype", ",", "NamedType", ")", ":", "if", "isinstance", "(", "constraint", ".", "supertype", ",", "NamedType", ")", ":", "if", "constraint", ".", "subtype", ".", "name", "==", "constraint", ".", "supertype", ".", "name", ":", "return", "{", "}", "else", ":", "return", "Refutation", "(", "'Cannot reconcile different atomic types: %s'", "%", "constraint", ")", "elif", "isinstance", "(", "constraint", ".", "supertype", ",", "Variable", ")", ":", "return", "{", "constraint", ".", "supertype", ".", "name", ":", "constraint", ".", "subtype", "}", "else", ":", "return", "Refutation", "(", "'Cannot reconcile atomic type with non-atomic type: %s'", "%", "constraint", ")", "elif", "isinstance", "(", "constraint", ".", "supertype", ",", "NamedType", ")", ":", "if", "isinstance", "(", "constraint", ".", "subtype", ",", "NamedType", ")", ":", "if", "constraint", ".", "subtype", ".", "name", "==", "constraint", ".", "supertype", ".", "name", ":", "return", "{", "}", "else", ":", "return", "Refutation", "(", "'Cannot reconcile different atomic types: %s'", "%", "constraint", ")", "elif", "isinstance", "(", "constraint", ".", "subtype", ",", "Variable", ")", ":", "return", "{", "constraint", ".", "subtype", ".", "name", ":", "constraint", ".", "supertype", "}", "else", ":", "return", "Refutation", "(", "'Cannot reconcile non-atomic type with atomic type: %s'", "%", "constraint", ")", "elif", "isinstance", "(", "constraint", ".", "supertype", ",", "Union", ")", ":", "# Lots of stuff could happen here; unsure if there's research to bring to bear", "if", "constraint", ".", "subtype", "in", "constraint", ".", "supertype", ".", "types", ":", "return", "{", "}", "return", "Stumper", "(", "constraint", ")" ]
Register signal handlers
def _setup_signal_handler ( self ) : signal . signal ( signal . SIGTERM , self . _signal_handler ) signal . signal ( signal . SIGINT , self . _signal_handler ) signal . signal ( signal . SIGQUIT , self . _signal_handler )
1,747
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L83-L87
[ "def", "next_unused_name_in_group", "(", "grp", ",", "length", ")", ":", "# While", "#", "# ltrs = string.ascii_letters + string.digits", "# name = ''.join([random.choice(ltrs) for i in range(length)])", "#", "# seems intuitive, its performance is abysmal compared to", "#", "# '%0{0}x'.format(length) % random.getrandbits(length * 4)", "#", "# The difference is a factor of 20. Idea from", "#", "# https://stackoverflow.com/questions/2782229/most-lightweight-way-", "# to-create-a-random-string-and-a-random-hexadecimal-number/", "# 35161595#35161595", "fmt", "=", "'%0{0}x'", ".", "format", "(", "length", ")", "name", "=", "fmt", "%", "random", ".", "getrandbits", "(", "length", "*", "4", ")", "while", "name", "in", "grp", ":", "name", "=", "fmt", "%", "random", ".", "getrandbits", "(", "length", "*", "4", ")", "return", "name" ]
Method called when handling signals
def _signal_handler ( self , signum , frame ) : if self . _options . config : with open ( self . _options . config , "w" ) as cfg : yaml . dump ( self . _home_assistant_config ( ) , cfg ) print ( "Dumped home assistant configuration at" , self . _options . config ) self . _connection . close ( ) sys . exit ( 0 )
1,748
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L89-L98
[ "def", "get_html_clear_filename", "(", "filename", ")", ":", "newFilename", "=", "filename", ".", "replace", "(", "\".html\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".md\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".txt\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".tile\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".jade\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".rst\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\".docx\"", ",", "\"\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\"index\"", ",", "\"home\"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\"-\"", ",", "\" \"", ")", "newFilename", "=", "newFilename", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "newFilename", "=", "newFilename", ".", "title", "(", ")", "return", "newFilename" ]
Monitor the bus for events and handle them
def start ( self ) : print ( "Entering monitoring mode, press CTRL-C to quit" ) serial = self . _connection . serial while True : serial . write ( b"@R" ) length = int ( serial . read ( ) , 16 ) data = serial . read ( length * 2 ) message = messages . parse ( data ) if not ( self . _options . filter and message . entity and message . entity in self . _devices ) : logging . debug ( " " . join ( message . bytes ) ) if not self . _options . config or message . entity is None or message . entity in self . _devices : continue print ( "New device found" ) ha_id = input ( "Enter home assistant unique ID: " ) name = input ( "Enter name: " ) self . _add_device ( scs_id = message . entity , ha_id = ha_id , name = name )
1,749
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L100-L122
[ "def", "get_minimum_score_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'minimum_score'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_cardinal_values'", ":", "self", ".", "_my_map", "[", "'minimumScore'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Add device to the list of known ones
def _add_device ( self , scs_id , ha_id , name ) : if scs_id in self . _devices : return self . _devices [ scs_id ] = { 'name' : name , 'ha_id' : ha_id }
1,750
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L124-L132
[ "def", "getOverlayColor", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayColor", "pfRed", "=", "c_float", "(", ")", "pfGreen", "=", "c_float", "(", ")", "pfBlue", "=", "c_float", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "(", "pfRed", ")", ",", "byref", "(", "pfGreen", ")", ",", "byref", "(", "pfBlue", ")", ")", "return", "result", ",", "pfRed", ".", "value", ",", "pfGreen", ".", "value", ",", "pfBlue", ".", "value" ]
Creates home assistant configuration for the known devices
def _home_assistant_config ( self ) : devices = { } for scs_id , dev in self . _devices . items ( ) : devices [ dev [ 'ha_id' ] ] = { 'name' : dev [ 'name' ] , 'scs_id' : scs_id } return { 'devices' : devices }
1,751
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L134-L142
[ "def", "process_response", "(", "self", ",", "request", ",", "response", ")", ":", "try", ":", "modified", "=", "request", ".", "session", ".", "modified", "except", "AttributeError", ":", "pass", "else", ":", "if", "modified", "or", "settings", ".", "SESSION_SAVE_EVERY_REQUEST", ":", "if", "request", ".", "session", ".", "get_expire_at_browser_close", "(", ")", ":", "max_age", "=", "None", "expires", "=", "None", "else", ":", "max_age", "=", "request", ".", "session", ".", "get_expiry_age", "(", ")", "expires_time", "=", "time", ".", "time", "(", ")", "+", "max_age", "expires", "=", "cookie_date", "(", "expires_time", ")", "# Save the session data and refresh the client cookie.", "request", ".", "session", ".", "save", "(", ")", "response", ".", "set_cookie", "(", "settings", ".", "SESSION_COOKIE_NAME", ",", "request", ".", "session", ".", "session_key", ",", "max_age", "=", "max_age", ",", "expires", "=", "expires", ",", "domain", "=", "settings", ".", "SESSION_COOKIE_DOMAIN", ",", "path", "=", "settings", ".", "SESSION_COOKIE_PATH", ",", "secure", "=", "settings", ".", "SESSION_COOKIE_SECURE", "or", "None", ")", "return", "response" ]
Load the filter file and populates self . _devices accordingly
def _load_filter ( self , config ) : path = pathlib . Path ( config ) if not path . is_file ( ) : return with open ( config , 'r' ) as conf : devices = yaml . load ( conf ) [ 'devices' ] for ha_id , dev in devices . items ( ) : self . _devices [ dev [ 'scs_id' ] ] = { ha_id : dev , 'name' : dev [ 'name' ] }
1,752
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/monitor/__init__.py#L144-L155
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
Closes the connection to the serial port and ensure no pending operatoin are left
def close ( self ) : self . _serial . write ( b"@c" ) self . _serial . read ( ) self . _serial . close ( )
1,753
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/connection.py#L48-L53
[ "def", "to_header_str", "(", "self", ")", ":", "h_parts", "=", "[", "]", "if", "self", ".", "root", ":", "h_parts", ".", "append", "(", "ROOT", "+", "'='", "+", "self", ".", "root", ")", "if", "self", ".", "parent", ":", "h_parts", ".", "append", "(", "PARENT", "+", "'='", "+", "self", ".", "parent", ")", "if", "self", ".", "sampled", "is", "not", "None", ":", "h_parts", ".", "append", "(", "SAMPLE", "+", "'='", "+", "str", "(", "self", ".", "sampled", ")", ")", "if", "self", ".", "data", ":", "for", "key", "in", "self", ".", "data", ":", "h_parts", ".", "append", "(", "key", "+", "'='", "+", "self", ".", "data", "[", "key", "]", ")", "return", "HEADER_DELIMITER", ".", "join", "(", "h_parts", ")" ]
Load a value converting it to the proper type if validation_type exists .
def load ( self , value ) : if self . property_type is None : return value elif not isinstance ( self . property_type , BaseType ) : raise TypeError ( 'property_type must be schematics BaseType' ) else : native_value = self . property_type . to_native ( value ) self . property_type . validate ( native_value ) return native_value
1,754
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L35-L44
[ "def", "add_string", "(", "self", ",", "data", ")", ":", "lines", "=", "[", "]", "while", "data", ":", "match", "=", "self", ".", "_line_end_re", ".", "search", "(", "data", ")", "if", "match", "is", "None", ":", "chunk", "=", "data", "else", ":", "chunk", "=", "data", "[", ":", "match", ".", "end", "(", ")", "]", "data", "=", "data", "[", "len", "(", "chunk", ")", ":", "]", "if", "self", ".", "_buf", "and", "self", ".", "_buf", "[", "-", "1", "]", ".", "endswith", "(", "b", "(", "'\\r'", ")", ")", "and", "not", "chunk", ".", "startswith", "(", "b", "(", "'\\n'", ")", ")", ":", "# if we get a carriage return followed by something other than", "# a newline then we assume that we're overwriting the current", "# line (ie. a progress bar)", "#", "# We don't terminate lines that end with a carriage return until", "# we see what's coming next so we can distinguish between a", "# progress bar situation and a Windows line terminator.", "#", "# TODO(adrian): some day these hacks should be replaced with", "# real terminal emulation", "lines", ".", "append", "(", "self", ".", "_finish_line", "(", ")", ")", "self", ".", "_buf", ".", "append", "(", "chunk", ")", "if", "chunk", ".", "endswith", "(", "b", "(", "'\\n'", ")", ")", ":", "lines", ".", "append", "(", "self", ".", "_finish_line", "(", ")", ")", "return", "lines" ]
Set unspecified property_keys for each ConfigProperty to the name of the class attr
def _update_property_keys ( cls ) : for attr_name , config_prop in cls . _iter_config_props ( ) : if config_prop . property_key is None : config_prop . property_key = attr_name
1,755
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L77-L81
[ "def", "upload", "(", "self", ",", "params", "=", "{", "}", ")", ":", "if", "self", ".", "upload_token", "is", "not", "None", ":", "# resume upload", "status", "=", "self", ".", "check", "(", ")", "if", "status", "[", "'status'", "]", "!=", "4", ":", "return", "self", ".", "commit", "(", ")", "else", ":", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")", "else", ":", "# new upload", "self", ".", "create", "(", "self", ".", "prepare_video_params", "(", "*", "*", "params", ")", ")", "self", ".", "create_file", "(", ")", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")" ]
Set instance property to a value and add it varz if needed
def _set_instance_prop ( self , attr_name , config_prop , value ) : setattr ( self , attr_name , value ) # add to varz if it is not private if not config_prop . exclude_from_varz : self . varz [ attr_name ] = value
1,756
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L83-L89
[ "def", "get_listing", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'listing'", ")", ":", "allEvents", "=", "self", ".", "get_allEvents", "(", ")", "openEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", "closedEvents", "=", "allEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", "publicEvents", "=", "allEvents", ".", "instance_of", "(", "PublicEvent", ")", "allSeries", "=", "allEvents", ".", "instance_of", "(", "Series", ")", "self", ".", "listing", "=", "{", "'allEvents'", ":", "allEvents", ",", "'openEvents'", ":", "openEvents", ",", "'closedEvents'", ":", "closedEvents", ",", "'publicEvents'", ":", "publicEvents", ",", "'allSeries'", ":", "allSeries", ",", "'regOpenEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedEvents'", ":", "publicEvents", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "publicevent__category__isnull", "=", "True", ")", "|", "Q", "(", "publicevent__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateEvents'", ":", "publicEvents", ".", "filter", "(", "publicevent__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'publicevent__category'", ")", ",", "'regOpenSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "True", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'regClosedSeries'", ":", "allSeries", ".", "filter", "(", "registrationOpen", "=", "False", ")", ".", "filter", "(", "Q", "(", "series__category__isnull", "=", "True", ")", "|", "Q", "(", "series__category__separateOnRegistrationPage", "=", "False", ")", ")", ",", "'categorySeparateSeries'", ":", "allSeries", ".", "filter", "(", "series__category__separateOnRegistrationPage", "=", "True", ")", ".", "order_by", "(", "'series__category'", ")", ",", "}", "return", "self", ".", "listing" ]
Load values for all ConfigProperty attributes
def _load ( self ) : for attr_name , config_prop in self . _iter_config_props ( ) : found = False for loader in self . _loaders : if loader . exists ( config_prop . property_key ) : raw_value = loader . get ( config_prop . property_key ) converted_value = config_prop . load ( raw_value ) self . _set_instance_prop ( attr_name , config_prop , converted_value ) found = True break if not found : if not config_prop . required or config_prop . default is not None : self . _set_instance_prop ( attr_name , config_prop , config_prop . default ) else : raise ValueError ( 'Missing required ConfigProperty {}' . format ( attr_name ) )
1,757
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/config.py#L91-L108
[ "def", "compress_pdf", "(", "pdf_fpath", ",", "output_fname", "=", "None", ")", ":", "import", "utool", "as", "ut", "ut", ".", "assertpath", "(", "pdf_fpath", ")", "suffix", "=", "'_'", "+", "ut", ".", "get_datestamp", "(", "False", ")", "+", "'_compressed'", "print", "(", "'pdf_fpath = %r'", "%", "(", "pdf_fpath", ",", ")", ")", "output_pdf_fpath", "=", "ut", ".", "augpath", "(", "pdf_fpath", ",", "suffix", ",", "newfname", "=", "output_fname", ")", "print", "(", "'output_pdf_fpath = %r'", "%", "(", "output_pdf_fpath", ",", ")", ")", "gs_exe", "=", "find_ghostscript_exe", "(", ")", "cmd_list", "=", "(", "gs_exe", ",", "'-sDEVICE=pdfwrite'", ",", "'-dCompatibilityLevel=1.4'", ",", "'-dNOPAUSE'", ",", "'-dQUIET'", ",", "'-dBATCH'", ",", "'-sOutputFile='", "+", "output_pdf_fpath", ",", "pdf_fpath", ")", "ut", ".", "cmd", "(", "*", "cmd_list", ")", "return", "output_pdf_fpath" ]
Return an absolute path to a target file that is located in the same directory as as_file
def in_same_dir ( as_file , target_file ) : return os . path . abspath ( os . path . join ( os . path . dirname ( as_file ) , target_file ) )
1,758
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/utils.py#L5-L13
[ "def", "get_records", "(", "self", ")", ":", "form", "=", "self", ".", "request", ".", "form", "ar_count", "=", "self", ".", "get_ar_count", "(", ")", "records", "=", "[", "]", "# Group belonging AR fields together", "for", "arnum", "in", "range", "(", "ar_count", ")", ":", "record", "=", "{", "}", "s1", "=", "\"-{}\"", ".", "format", "(", "arnum", ")", "keys", "=", "filter", "(", "lambda", "key", ":", "s1", "in", "key", ",", "form", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "new_key", "=", "key", ".", "replace", "(", "s1", ",", "\"\"", ")", "value", "=", "form", ".", "get", "(", "key", ")", "record", "[", "new_key", "]", "=", "value", "records", ".", "append", "(", "record", ")", "return", "records" ]
Compares a name in question to a specified name separated into given and family .
def compare_name ( given_name , family_name , question_name ) : # lowercase everything given_name = given_name . lower ( ) family_name = family_name . lower ( ) question_name = question_name . lower ( ) # rearrange names given as "last, first middle" if ',' in question_name : name_split = question_name . split ( ',' ) name_split . reverse ( ) question_name = ' ' . join ( name_split ) . strip ( ) # remove periods question_name = question_name . replace ( '.' , '' ) given_name = given_name . replace ( '.' , '' ) family_name = family_name . replace ( '.' , '' ) # split names by , <space> - . given_name = list ( filter ( None , re . split ( r"[, \-.]+" , given_name ) ) ) num_family_names = len ( list ( filter ( None , re . split ( "[, .]+" , family_name ) ) ) ) # split name in question by , <space> - . name_split = list ( filter ( None , re . split ( r"[, \-.]+" , question_name ) ) ) first_name = [ name_split [ 0 ] ] if len ( name_split ) > 2 : first_name += [ n for n in name_split [ 1 : - num_family_names ] ] if len ( first_name ) > 1 and len ( given_name ) == len ( first_name ) : # both have same number of first and middle names/initials for i in range ( 1 , len ( first_name ) ) : first_name [ i ] = first_name [ i ] [ 0 ] given_name [ i ] = given_name [ i ] [ 0 ] elif len ( given_name ) != len ( first_name ) : min_names = min ( len ( given_name ) , len ( first_name ) ) first_name = first_name [ : min_names ] given_name = given_name [ : min_names ] # first initial if len ( first_name [ 0 ] ) == 1 or len ( given_name [ 0 ] ) == 1 : given_name [ 0 ] = given_name [ 0 ] [ 0 ] first_name [ 0 ] = first_name [ 0 ] [ 0 ] # first and middle initials combined if len ( first_name [ 0 ] ) > 1 or len ( given_name [ 0 ] ) > 1 : given_name [ 0 ] = given_name [ 0 ] [ 0 ] first_name [ 0 ] = name_split [ 0 ] [ 0 ] # Hyphenated last name may need to be reconnected if num_family_names == 1 and '-' in family_name : num_hyphen = family_name . count ( '-' ) family_name_compare = '-' . join ( name_split [ - ( num_hyphen + 1 ) : ] ) else : family_name_compare = ' ' . join ( name_split [ - num_family_names : ] ) return given_name == first_name and family_name == family_name_compare
1,759
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L91-L166
[ "def", "start", "(", "self", ")", ":", "self", ".", "publish_properties", "(", ")", "self", ".", "subscribe_topics", "(", ")", "gc", ".", "collect", "(", ")", "self", ".", "set_state", "(", "\"ready\"", ")", "while", "True", ":", "try", ":", "if", "not", "utils", ".", "wlan", ".", "isconnected", "(", ")", ":", "utils", ".", "wifi_connect", "(", ")", "# publish device data", "self", ".", "publish_data", "(", ")", "# check for new mqtt messages", "self", ".", "mqtt", ".", "check_msg", "(", ")", "idle", "(", ")", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "self", ".", "set_state", "(", "\"disconnected\"", ")", "self", ".", "mqtt", ".", "disconnect", "(", ")" ]
Checks that the given time history is properly formatted .
def _validate_isvalid_history ( self , isvalid_history , field , value ) : # Check the type has appropriate units history_type = value [ 'type' ] if history_type . endswith ( 'emission' ) : history_type = 'emission' elif history_type . endswith ( 'absorption' ) : history_type = 'absorption' quantity = 1.0 * ( units ( value [ 'quantity' ] [ 'units' ] ) ) try : quantity . to ( property_units [ history_type ] ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ history_type ] ) # Check that time has appropriate units time = 1.0 * ( units ( value [ 'time' ] [ 'units' ] ) ) try : time . to ( property_units [ 'time' ] ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ 'time' ] ) # Check that the values have the right number of columns n_cols = len ( value [ 'values' ] [ 0 ] ) max_cols = max ( value [ 'time' ] [ 'column' ] , value [ 'quantity' ] [ 'column' ] , value . get ( 'uncertainty' , { } ) . get ( 'column' , 0 ) ) + 1 if n_cols > max_cols : self . _error ( field , 'too many columns in the values' ) elif n_cols < max_cols : self . _error ( field , 'not enough columns in the values' )
1,760
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L221-L262
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Checks for valid given value and appropriate units .
def _validate_isvalid_quantity ( self , isvalid_quantity , field , value ) : quantity = Q_ ( value [ 0 ] ) low_lim = 0.0 * units ( property_units [ field ] ) try : if quantity <= low_lim : self . _error ( field , 'value must be greater than 0.0 {}' . format ( property_units [ field ] ) , ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ field ] )
1,761
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L264-L287
[ "def", "_replace_auth_key", "(", "user", ",", "key", ",", "enc", "=", "'ssh-rsa'", ",", "comment", "=", "''", ",", "options", "=", "None", ",", "config", "=", "'.ssh/authorized_keys'", ")", ":", "auth_line", "=", "_format_auth_line", "(", "key", ",", "enc", ",", "comment", ",", "options", "or", "[", "]", ")", "lines", "=", "[", "]", "full", "=", "_get_config_file", "(", "user", ",", "config", ")", "try", ":", "# open the file for both reading AND writing", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "full", ",", "'r'", ")", "as", "_fh", ":", "for", "line", "in", "_fh", ":", "# We don't need any whitespace-only containing lines or arbitrary doubled newlines", "line", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "line", ".", "strip", "(", ")", ")", "if", "line", "==", "''", ":", "continue", "line", "+=", "'\\n'", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "# Commented Line", "lines", ".", "append", "(", "line", ")", "continue", "comps", "=", "re", ".", "findall", "(", "r'((.*)\\s)?(ssh-[a-z0-9-]+|ecdsa-[a-z0-9-]+)\\s([a-zA-Z0-9+/]+={0,2})(\\s(.*))?'", ",", "line", ")", "if", "comps", "and", "len", "(", "comps", "[", "0", "]", ")", ">", "3", "and", "comps", "[", "0", "]", "[", "3", "]", "==", "key", ":", "# Found our key, replace it", "lines", ".", "append", "(", "auth_line", ")", "else", ":", "lines", ".", "append", "(", "line", ")", "_fh", ".", "close", "(", ")", "# Re-open the file writable after properly closing it", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "full", ",", "'wb'", ")", "as", "_fh", ":", "# Write out any changes", "_fh", ".", "writelines", "(", "salt", ".", "utils", ".", "data", ".", "encode", "(", "lines", ")", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Problem reading or writing to key file: {0}'", ".", "format", "(", "exc", ")", ")" ]
Checks for valid given value and appropriate units with uncertainty .
def _validate_isvalid_uncertainty ( self , isvalid_uncertainty , field , value ) : self . _validate_isvalid_quantity ( True , field , value ) # This len check is necessary for reasons that aren't quite clear to me # Cerberus calls this validation method even when lists have only one element # and should therefore be validated only by isvalid_quantity if len ( value ) > 1 and value [ 1 ] [ 'uncertainty-type' ] != 'relative' : if value [ 1 ] . get ( 'uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'uncertainty' ] ] ) if value [ 1 ] . get ( 'upper-uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'upper-uncertainty' ] ] ) if value [ 1 ] . get ( 'lower-uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'lower-uncertainty' ] ] )
1,762
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L289-L315
[ "def", "destroy_page", "(", "self", ",", "tab_dict", ")", ":", "# logger.info(\"destroy page %s\" % tab_dict['controller'].model.state.get_path())", "if", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "is", "not", "None", ":", "handler_id", "=", "tab_dict", "[", "'source_code_changed_handler_id'", "]", "if", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "handler_is_connected", "(", "handler_id", ")", ":", "tab_dict", "[", "'controller'", "]", ".", "view", ".", "source_view", ".", "get_buffer", "(", ")", ".", "disconnect", "(", "handler_id", ")", "else", ":", "logger", ".", "warning", "(", "\"Source code changed handler of state {0} was already removed.\"", ".", "format", "(", "tab_dict", "[", "'state_m'", "]", ")", ")", "self", ".", "remove_controller", "(", "tab_dict", "[", "'controller'", "]", ")" ]
Checks for valid ORCID if given .
def _validate_isvalid_orcid ( self , isvalid_orcid , field , value ) : if isvalid_orcid and 'ORCID' in value : try : res = search_orcid ( value [ 'ORCID' ] ) except ConnectionError : warn ( 'network not available, ORCID not validated.' ) return except HTTPError : self . _error ( field , 'ORCID incorrect or invalid for ' + value [ 'name' ] ) return family_name = res [ 'name' ] [ 'family-name' ] [ 'value' ] given_name = res [ 'name' ] [ 'given-names' ] [ 'value' ] if not compare_name ( given_name , family_name , value [ 'name' ] ) : self . _error ( field , 'Name and ORCID do not match. Name supplied: ' + value [ 'name' ] + '. Name associated with ORCID: ' + ' ' . join ( [ given_name , family_name ] ) )
1,763
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L420-L451
[ "def", "join_keys", "(", "x", ",", "y", ",", "by", "=", "None", ")", ":", "if", "by", "is", "None", ":", "by", "=", "slice", "(", "None", ",", "None", ",", "None", ")", "if", "isinstance", "(", "by", ",", "tuple", ")", ":", "by", "=", "list", "(", "by", ")", "joint", "=", "x", "[", "by", "]", ".", "append", "(", "y", "[", "by", "]", ",", "ignore_index", "=", "True", ")", "keys", "=", "ninteraction", "(", "joint", ",", "drop", "=", "True", ")", "keys", "=", "np", ".", "asarray", "(", "keys", ")", "nx", ",", "ny", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "return", "{", "'x'", ":", "keys", "[", "np", ".", "arange", "(", "nx", ")", "]", ",", "'y'", ":", "keys", "[", "nx", "+", "np", ".", "arange", "(", "ny", ")", "]", "}" ]
Checks for valid specification of composition .
def _validate_isvalid_composition ( self , isvalid_composition , field , value ) : sum_amount = 0.0 if value [ 'kind' ] in [ 'mass fraction' , 'mole fraction' ] : low_lim = 0.0 up_lim = 1.0 total_amount = 1.0 elif value [ 'kind' ] in [ 'mole percent' ] : low_lim = 0.0 up_lim = 100.0 total_amount = 100.0 else : self . _error ( field , 'composition kind must be "mole percent", "mass fraction", or ' '"mole fraction"' ) return False for sp in value [ 'species' ] : amount = sp [ 'amount' ] [ 0 ] sum_amount += amount # Check that amount within bounds, based on kind specified if amount < low_lim : self . _error ( field , 'Species ' + sp [ 'species-name' ] + ' ' + value [ 'kind' ] + ' must be greater than {:.1f}' . format ( low_lim ) ) elif amount > up_lim : self . _error ( field , 'Species ' + sp [ 'species-name' ] + ' ' + value [ 'kind' ] + ' must be less than {:.1f}' . format ( up_lim ) ) # Make sure mole/mass fraction sum to 1 if not np . isclose ( total_amount , sum_amount ) : self . _error ( field , 'Species ' + value [ 'kind' ] + 's do not sum to {:.1f}: ' . format ( total_amount ) + '{:f}' . format ( sum_amount ) )
1,764
https://github.com/pr-omethe-us/PyKED/blob/d9341a068c1099049a3f1de41c512591f342bf64/pyked/validation.py#L453-L499
[ "def", "check_update", "(", "from_currency", ",", "to_currency", ")", ":", "if", "from_currency", "not", "in", "ccache", ":", "# if currency never get converted before", "ccache", "[", "from_currency", "]", "=", "{", "}", "if", "ccache", "[", "from_currency", "]", ".", "get", "(", "to_currency", ")", "is", "None", ":", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "=", "{", "'last_update'", ":", "0", "}", "last_update", "=", "float", "(", "ccache", "[", "from_currency", "]", "[", "to_currency", "]", "[", "'last_update'", "]", ")", "if", "time", ".", "time", "(", ")", "-", "last_update", ">=", "30", "*", "60", ":", "# if last update is more than 30 min ago", "return", "True", "return", "False" ]
This is a slow operation .
def convert_types_slow ( df ) : dtypes = get_types ( df ) for k , v in dtypes . items ( ) : t = df [ df [ 'key' ] == k ] t [ 'value' ] = t [ 'value' ] . astype ( v ) df = df . apply ( convert_row , axis = 1 ) return df
1,765
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/analysis.py#L63-L70
[ "def", "extract_notebook_metatab", "(", "nb_path", ":", "Path", ")", ":", "from", "metatab", ".", "rowgenerators", "import", "TextRowGenerator", "import", "nbformat", "with", "nb_path", ".", "open", "(", ")", "as", "f", ":", "nb", "=", "nbformat", ".", "read", "(", "f", ",", "as_version", "=", "4", ")", "lines", "=", "'\\n'", ".", "join", "(", "[", "'Declare: metatab-latest'", "]", "+", "[", "get_cell_source", "(", "nb", ",", "tag", ")", "for", "tag", "in", "[", "'metadata'", ",", "'resources'", ",", "'schema'", "]", "]", ")", "doc", "=", "MetapackDoc", "(", "TextRowGenerator", "(", "lines", ")", ")", "doc", "[", "'Root'", "]", ".", "get_or_new_term", "(", "'Root.Title'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'Title'", ")", ".", "strip", "(", "'#'", ")", ".", "strip", "(", ")", "doc", "[", "'Root'", "]", ".", "get_or_new_term", "(", "'Root.Description'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'Description'", ")", "doc", "[", "'Documentation'", "]", ".", "get_or_new_term", "(", "'Root.Readme'", ")", ".", "value", "=", "get_cell_source", "(", "nb", ",", "'readme'", ")", "return", "doc" ]
Read all the trial data and plot the result of applying a function on them .
def plot_all ( * args , * * kwargs ) : dfs = do_all ( * args , * * kwargs ) ps = [ ] for line in dfs : f , df , config = line df . plot ( title = config [ 'name' ] ) ps . append ( df ) return ps
1,766
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/analysis.py#L139-L149
[ "def", "humanize", "(", "self", ",", "hexdigest", ",", "words", "=", "4", ",", "separator", "=", "'-'", ")", ":", "# Gets a list of byte values between 0-255.", "bytes", "=", "[", "int", "(", "x", ",", "16", ")", "for", "x", "in", "list", "(", "map", "(", "''", ".", "join", ",", "list", "(", "zip", "(", "hexdigest", "[", ":", ":", "2", "]", ",", "hexdigest", "[", "1", ":", ":", "2", "]", ")", ")", ")", ")", "]", "# Compress an arbitrary number of bytes to `words`.", "compressed", "=", "self", ".", "compress", "(", "bytes", ",", "words", ")", "# Map the compressed byte values through the word list.", "return", "separator", ".", "join", "(", "self", ".", "wordlist", "[", "byte", "]", "for", "byte", "in", "compressed", ")" ]
Get a text representation of an object .
def serialize ( v , known_modules = [ ] ) : tname = name ( v , known_modules = known_modules ) func = serializer ( tname ) return func ( v ) , tname
1,767
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/utils.py#L116-L120
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
Get an object from a text representation
def deserialize ( type_ , value = None , * * kwargs ) : if not isinstance ( type_ , str ) : return type_ des = deserializer ( type_ , * * kwargs ) if value is None : return des return des ( value )
1,768
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/utils.py#L155-L162
[ "def", "wind_direction", "(", "self", ",", "value", "=", "999.0", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'value {} need to be of type float '", "'for field `wind_direction`'", ".", "format", "(", "value", ")", ")", "if", "value", "<", "0.0", ":", "raise", "ValueError", "(", "'value need to be greater or equal 0.0 '", "'for field `wind_direction`'", ")", "if", "value", ">", "360.0", ":", "raise", "ValueError", "(", "'value need to be smaller 360.0 '", "'for field `wind_direction`'", ")", "self", ".", "_wind_direction", "=", "value" ]
Return parsed data . Parse it if not already parsed .
def content ( self ) : if self . _content is None : self . _content = self . parse_files ( ) return self . _content
1,769
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/parsers.py#L48-L57
[ "def", "_delete_collection", "(", "self", ",", "*", "*", "kwargs", ")", ":", "error_message", "=", "\"The request must include \\\"requests_params\\\": {\\\"params\\\": \\\"options=<glob pattern>\\\"} as kwarg\"", "try", ":", "if", "kwargs", "[", "'requests_params'", "]", "[", "'params'", "]", ".", "split", "(", "'='", ")", "[", "0", "]", "!=", "'options'", ":", "raise", "MissingRequiredRequestsParameter", "(", "error_message", ")", "except", "KeyError", ":", "raise", "requests_params", "=", "self", ".", "_handle_requests_params", "(", "kwargs", ")", "delete_uri", "=", "self", ".", "_meta_data", "[", "'uri'", "]", "session", "=", "self", ".", "_meta_data", "[", "'bigip'", "]", ".", "_meta_data", "[", "'icr_session'", "]", "session", ".", "delete", "(", "delete_uri", ",", "*", "*", "requests_params", ")" ]
Find the files and parse them .
def parse_files ( self ) : log_re = self . log_format_regex log_lines = [ ] for log_file in self . matching_files ( ) : with open ( log_file ) as f : matches = re . finditer ( log_re , f . read ( ) ) for match in matches : log_lines . append ( match . groupdict ( ) ) return log_lines
1,770
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/parsers.py#L86-L100
[ "def", "db_get", "(", "name", ",", "*", "*", "connection_args", ")", ":", "dbc", "=", "_connect", "(", "*", "*", "connection_args", ")", "if", "dbc", "is", "None", ":", "return", "[", "]", "cur", "=", "dbc", ".", "cursor", "(", ")", "qry", "=", "(", "'SELECT DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME FROM '", "'INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME=%(dbname)s;'", ")", "args", "=", "{", "\"dbname\"", ":", "name", "}", "_execute", "(", "cur", ",", "qry", ",", "args", ")", "if", "cur", ".", "rowcount", ":", "rows", "=", "cur", ".", "fetchall", "(", ")", "return", "{", "'character_set'", ":", "rows", "[", "0", "]", "[", "0", "]", ",", "'collate'", ":", "rows", "[", "0", "]", "[", "1", "]", "}", "return", "{", "}" ]
When serializing an agent distribution remove the thresholds in order to avoid cluttering the YAML definition file .
def serialize_distribution ( network_agents , known_modules = [ ] ) : d = deepcopy ( list ( network_agents ) ) for v in d : if 'threshold' in v : del v [ 'threshold' ] v [ 'agent_type' ] = serialize_type ( v [ 'agent_type' ] , known_modules = known_modules ) return d
1,771
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/agents/__init__.py#L394-L405
[ "def", "i4_bit_hi1", "(", "n", ")", ":", "i", "=", "np", ".", "floor", "(", "n", ")", "bit", "=", "0", "while", "i", ">", "0", ":", "bit", "+=", "1", "i", "//=", "2", "return", "bit" ]
Validate states to avoid ignoring states during initialization
def _validate_states ( states , topology ) : states = states or [ ] if isinstance ( states , dict ) : for x in states : assert x in topology . node else : assert len ( states ) <= len ( topology ) return states
1,772
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/agents/__init__.py#L423-L431
[ "def", "print_info", "(", "self", ")", ":", "table", "=", "PrettyTable", "(", ")", "start_string", "=", "\"DutSerial {} \\n\"", ".", "format", "(", "self", ".", "name", ")", "row", "=", "[", "]", "info_string", "=", "\"\"", "if", "self", ".", "config", ":", "info_string", "=", "info_string", "+", "\"Configuration for this DUT:\\n\\n {} \\n\"", ".", "format", "(", "self", ".", "config", ")", "if", "self", ".", "comport", ":", "table", ".", "add_column", "(", "\"COM port\"", ",", "[", "]", ")", "row", ".", "append", "(", "self", ".", "comport", ")", "if", "self", ".", "port", ":", "if", "hasattr", "(", "self", ".", "port", ",", "\"baudrate\"", ")", ":", "table", ".", "add_column", "(", "\"Baudrate\"", ",", "[", "]", ")", "row", ".", "append", "(", "self", ".", "port", ".", "baudrate", ")", "if", "hasattr", "(", "self", ".", "port", ",", "\"xonxoff\"", ")", ":", "table", ".", "add_column", "(", "\"XON/XOFF\"", ",", "[", "]", ")", "row", ".", "append", "(", "self", ".", "port", ".", "xonxoff", ")", "if", "hasattr", "(", "self", ".", "port", ",", "\"timeout\"", ")", ":", "table", ".", "add_column", "(", "\"Timeout\"", ",", "[", "]", ")", "row", ".", "append", "(", "self", ".", "port", ".", "timeout", ")", "if", "hasattr", "(", "self", ".", "port", ",", "\"rtscts\"", ")", ":", "table", ".", "add_column", "(", "\"RTSCTS\"", ",", "[", "]", ")", "row", ".", "append", "(", "self", ".", "port", ".", "rtscts", ")", "if", "self", ".", "location", ":", "table", ".", "add_column", "(", "\"Location\"", ",", "[", "]", ")", "row", ".", "append", "(", "\"X = {}, Y = {}\"", ".", "format", "(", "self", ".", "location", ".", "x_coord", ",", "self", ".", "location", ".", "y_coord", ")", ")", "self", ".", "logger", ".", "info", "(", "start_string", ")", "self", ".", "logger", ".", "debug", "(", "info_string", ")", "table", ".", "add_row", "(", "row", ")", "print", "(", "table", ")" ]
Convenience method to allow specifying agents by class or class name .
def _convert_agent_types ( ind , to_string = False , * * kwargs ) : if to_string : return serialize_distribution ( ind , * * kwargs ) return deserialize_distribution ( ind , * * kwargs )
1,773
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/agents/__init__.py#L434-L438
[ "def", "video_bitwise_bottom", "(", "x", ",", "model_hparams", ",", "vocab_size", ")", ":", "pixel_embedding_size", "=", "64", "inputs", "=", "x", "with", "tf", ".", "variable_scope", "(", "\"video_modality_bitwise\"", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "common_layers", ".", "summarize_video", "(", "inputs", ",", "\"bottom\"", ")", "# Embed bitwise.", "assert", "vocab_size", "==", "256", "embedded", "=", "discretization", ".", "int_to_bit_embed", "(", "inputs", ",", "8", ",", "pixel_embedding_size", ")", "# Project.", "return", "tf", ".", "layers", ".", "dense", "(", "embedded", ",", "model_hparams", ".", "hidden_size", ",", "name", "=", "\"merge_pixel_embedded_frames\"", ")" ]
Used in the initialization of agents given an agent distribution .
def _agent_from_distribution ( distribution , value = - 1 , agent_id = None ) : if value < 0 : value = random . random ( ) for d in sorted ( distribution , key = lambda x : x [ 'threshold' ] ) : threshold = d [ 'threshold' ] # Check if the definition matches by id (first) or by threshold if not ( ( agent_id is not None and threshold == STATIC_THRESHOLD and agent_id in d [ 'ids' ] ) or ( value >= threshold [ 0 ] and value < threshold [ 1 ] ) ) : continue state = { } if 'state' in d : state = deepcopy ( d [ 'state' ] ) return d [ 'agent_type' ] , state raise Exception ( 'Distribution for value {} not found in: {}' . format ( value , distribution ) )
1,774
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/agents/__init__.py#L441-L456
[ "def", "pop_focus", "(", "self", ",", "cli", ")", ":", "if", "len", "(", "self", ".", "focus_stack", ")", ">", "1", ":", "self", ".", "focus_stack", ".", "pop", "(", ")", "else", ":", "raise", "IndexError", "(", "'Cannot pop last item from the focus stack.'", ")" ]
Run the app .
def launch ( self , port = None ) : if port is not None : self . port = port url = 'http://127.0.0.1:{PORT}' . format ( PORT = self . port ) print ( 'Interface starting at {url}' . format ( url = url ) ) self . listen ( self . port ) # webbrowser.open(url) tornado . ioloop . IOLoop . instance ( ) . start ( )
1,775
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/web/__init__.py#L245-L254
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Get stats for status codes by date .
def status_codes_by_date_stats ( ) : def date_counter ( queryset ) : return dict ( Counter ( map ( lambda dt : ms_since_epoch ( datetime . combine ( make_naive ( dt ) , datetime . min . time ( ) ) ) , list ( queryset . values_list ( 'datetime' , flat = True ) ) ) ) ) codes = { low : date_counter ( RequestLog . objects . filter ( status_code__gte = low , status_code__lt = high ) ) for low , high in ( ( 200 , 300 ) , ( 300 , 400 ) , ( 400 , 500 ) ) } codes [ 500 ] = date_counter ( RequestLog . objects . filter ( status_code__gte = 500 ) ) codes [ 'attacks' ] = date_counter ( RequestLog . objects . filter ( status_code__in = ( 400 , 444 , 502 ) ) ) stats = { } for code in ( 200 , 300 , 400 , 500 , 'attacks' ) : for date , count in codes [ code ] . items ( ) : if stats . get ( date , None ) is None : stats [ date ] = { 200 : 0 , 300 : 0 , 400 : 0 , 500 : 0 , 'attacks' : 0 } stats [ date ] [ code ] += count stats = sorted ( [ ( k , v ) for k , v in stats . items ( ) ] , key = lambda x : x [ 0 ] ) return stats
1,776
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/logs/stats.py#L38-L67
[ "def", "create_container", "(", "container_name", ",", "profile", ",", "*", "*", "libcloud_kwargs", ")", ":", "conn", "=", "_get_driver", "(", "profile", "=", "profile", ")", "libcloud_kwargs", "=", "salt", ".", "utils", ".", "args", ".", "clean_kwargs", "(", "*", "*", "libcloud_kwargs", ")", "container", "=", "conn", ".", "create_container", "(", "container_name", ",", "*", "*", "libcloud_kwargs", ")", "return", "{", "'name'", ":", "container", ".", "name", ",", "'extra'", ":", "container", ".", "extra", "}" ]
Agents will try to enter . The pub checks if it is possible
def enter ( self , pub_id , * nodes ) : try : pub = self [ 'pubs' ] [ pub_id ] except KeyError : raise ValueError ( 'Pub {} is not available' . format ( pub_id ) ) if not pub [ 'open' ] or ( pub [ 'capacity' ] < ( len ( nodes ) + pub [ 'occupancy' ] ) ) : return False pub [ 'occupancy' ] += len ( nodes ) for node in nodes : node [ 'pub' ] = pub_id return True
1,777
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L25-L36
[ "def", "_db_filename_from_dataframe", "(", "base_filename", ",", "df", ")", ":", "db_filename", "=", "base_filename", "+", "(", "\"_nrows%d\"", "%", "len", "(", "df", ")", ")", "for", "column_name", "in", "df", ".", "columns", ":", "column_db_type", "=", "db_type", "(", "df", "[", "column_name", "]", ".", "dtype", ")", "column_name", "=", "column_name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "db_filename", "+=", "\".%s_%s\"", "%", "(", "column_name", ",", "column_db_type", ")", "return", "db_filename", "+", "\".db\"" ]
Agents will notify the pub they want to leave
def exit ( self , pub_id , * node_ids ) : try : pub = self [ 'pubs' ] [ pub_id ] except KeyError : raise ValueError ( 'Pub {} is not available' . format ( pub_id ) ) for node_id in node_ids : node = self . get_agent ( node_id ) if pub_id == node [ 'pub' ] : del node [ 'pub' ] pub [ 'occupancy' ] -= 1
1,778
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L43-L53
[ "def", "populateFromDirectory", "(", "self", ",", "vcfDirectory", ")", ":", "pattern", "=", "os", ".", "path", ".", "join", "(", "vcfDirectory", ",", "\"*.vcf.gz\"", ")", "dataFiles", "=", "[", "]", "indexFiles", "=", "[", "]", "for", "vcfFile", "in", "glob", ".", "glob", "(", "pattern", ")", ":", "dataFiles", ".", "append", "(", "vcfFile", ")", "indexFiles", ".", "append", "(", "vcfFile", "+", "\".tbi\"", ")", "self", ".", "populateFromFile", "(", "dataFiles", ",", "indexFiles", ")" ]
Look for friends to drink with
def looking_for_friends ( self ) : self . info ( 'I am looking for friends' ) available_friends = list ( self . get_agents ( drunk = False , pub = None , state_id = self . looking_for_friends . id ) ) if not available_friends : self . info ( 'Life sucks and I\'m alone!' ) return self . at_home befriended = self . try_friends ( available_friends ) if befriended : return self . looking_for_pub
1,779
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L73-L84
[ "def", "generate_version_file", "(", "self", ",", "schema_filename", ",", "binding_filename", ")", ":", "version_filename", "=", "binding_filename", "+", "'_version.txt'", "version_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "binding_dir", ",", "version_filename", ")", "schema_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "schema_dir", ",", "schema_filename", ")", "try", ":", "tstamp", ",", "svnpath", ",", "svnrev", ",", "version", "=", "self", ".", "get_version_info_from_svn", "(", "schema_path", ")", "except", "TypeError", ":", "pass", "else", ":", "self", ".", "write_version_file", "(", "version_path", ",", "tstamp", ",", "svnpath", ",", "svnrev", ",", "version", ")" ]
Look for a pub that accepts me and my friends
def looking_for_pub ( self ) : if self [ 'pub' ] != None : return self . sober_in_pub self . debug ( 'I am looking for a pub' ) group = list ( self . get_neighboring_agents ( ) ) for pub in self . env . available_pubs ( ) : self . debug ( 'We\'re trying to get into {}: total: {}' . format ( pub , len ( group ) ) ) if self . env . enter ( pub , self , * group ) : self . info ( 'We\'re all {} getting in {}!' . format ( len ( group ) , pub ) ) return self . sober_in_pub
1,780
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L87-L97
[ "def", "create_run_logfile", "(", "folder", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "\"run.log\"", ")", ",", "\"w\"", ")", "as", "f", ":", "datestring", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "f", ".", "write", "(", "\"timestamp: '%s'\"", "%", "datestring", ")" ]
Try to become friends with another agent . The chances of success depend on both agents openness .
def befriend ( self , other_agent , force = False ) : if force or self [ 'openness' ] > random ( ) : self . env . add_edge ( self , other_agent ) self . info ( 'Made some friend {}' . format ( other_agent ) ) return True return False
1,781
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L125-L134
[ "def", "clear_cached", "(", "self", ")", ":", "x", "=", "_COLUMN_CACHE", ".", "pop", "(", "(", "self", ".", "table_name", ",", "self", ".", "name", ")", ",", "None", ")", "if", "x", "is", "not", "None", ":", "logger", ".", "debug", "(", "'cleared cached value for column {!r} in table {!r}'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "table_name", ")", ")" ]
Look for random agents around me and try to befriend them
def try_friends ( self , others ) : befriended = False k = int ( 10 * self [ 'openness' ] ) shuffle ( others ) for friend in islice ( others , k ) : # random.choice >= 3.7 if friend == self : continue if friend . befriend ( self ) : self . befriend ( friend , force = True ) self . debug ( 'Hooray! new friend: {}' . format ( friend . id ) ) befriended = True else : self . debug ( '{} does not want to be friends' . format ( friend . id ) ) return befriended
1,782
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L136-L150
[ "def", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", ":", "status", "=", "_libcudnn", ".", "cudnnSetPooling2dDescriptor", "(", "poolingDesc", ",", "mode", ",", "windowHeight", ",", "windowWidth", ",", "verticalPadding", ",", "horizontalPadding", ",", "verticalStride", ",", "horizontalStride", ")", "cudnnCheckStatus", "(", "status", ")" ]
Compute the mean standard deviation min quartile1 quartile2 quartile3 and max of a vector
def profile_distribution ( data ) : if len ( data ) == 0 : return ( data , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan ) else : ddof = 1 if len ( data ) > 1 else 0 dist_mean = np . mean ( data ) dist_stdev = np . std ( data , ddof = ddof ) dist_min , dist_quartile1 , dist_quartile2 , dist_quartile3 , dist_max = np . percentile ( data , [ 0 , 25 , 50 , 75 , 100 ] ) dist_skew = skew ( data ) dist_kurtosis = kurtosis ( data ) return ( data , dist_mean , dist_stdev , dist_skew , dist_kurtosis , dist_min , dist_quartile1 , dist_quartile2 , dist_quartile3 , dist_max )
1,783
https://github.com/byu-dml/metalearn/blob/0a3b7cb339250144f6d2f70977f74fe457cecee3/metalearn/metafeatures/common_operations.py#L6-L27
[ "def", "addslashes", "(", "s", ",", "escaped_chars", "=", "None", ")", ":", "if", "escaped_chars", "is", "None", ":", "escaped_chars", "=", "[", "\"\\\\\"", ",", "\"'\"", ",", "]", "# l = [\"\\\\\", '\"', \"'\", \"\\0\", ]", "for", "i", "in", "escaped_chars", ":", "if", "i", "in", "s", ":", "s", "=", "s", ".", "replace", "(", "i", ",", "'\\\\'", "+", "i", ")", "return", "s" ]
Return the value as a dict raising error if conversion to dict is not possible
def to_native ( self , value ) : if isinstance ( value , dict ) : return value elif isinstance ( value , six . string_types ) : native_value = json . loads ( value ) if isinstance ( native_value , dict ) : return native_value else : raise ConversionError ( u'Cannot load value as a dict: {}' . format ( value ) )
1,784
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L14-L23
[ "def", "project_activity", "(", "index", ",", "start", ",", "end", ")", ":", "results", "=", "{", "\"metrics\"", ":", "[", "OpenedIssues", "(", "index", ",", "start", ",", "end", ")", ",", "ClosedIssues", "(", "index", ",", "start", ",", "end", ")", "]", "}", "return", "results" ]
Load a value as a list converting items if necessary
def to_native ( self , value ) : if isinstance ( value , six . string_types ) : value_list = value . split ( self . string_delim ) else : value_list = value to_native = self . member_type . to_native if self . member_type is not None else lambda x : x return [ to_native ( item ) for item in value_list ]
1,785
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L47-L55
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_flush", "(", ")", "filesize", "=", "self", ".", "file", ".", "tell", "(", ")", "super", "(", "BLFWriter", ",", "self", ")", ".", "stop", "(", ")", "# Write header in the beginning of the file", "header", "=", "[", "b\"LOGG\"", ",", "FILE_HEADER_SIZE", ",", "APPLICATION_ID", ",", "0", ",", "0", ",", "0", ",", "2", ",", "6", ",", "8", ",", "1", "]", "# The meaning of \"count of objects read\" is unknown", "header", ".", "extend", "(", "[", "filesize", ",", "self", ".", "uncompressed_size", ",", "self", ".", "count_of_objects", ",", "0", "]", ")", "header", ".", "extend", "(", "timestamp_to_systemtime", "(", "self", ".", "start_timestamp", ")", ")", "header", ".", "extend", "(", "timestamp_to_systemtime", "(", "self", ".", "stop_timestamp", ")", ")", "with", "open", "(", "self", ".", "file", ".", "name", ",", "\"r+b\"", ")", "as", "f", ":", "f", ".", "write", "(", "FILE_HEADER_STRUCT", ".", "pack", "(", "*", "header", ")", ")" ]
Validate each member of the list if member_type exists
def validate_member_type ( self , value ) : if self . member_type : for item in value : self . member_type . validate ( item )
1,786
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L57-L61
[ "def", "spkopa", "(", "filename", ")", ":", "filename", "=", "stypes", ".", "stringToCharP", "(", "filename", ")", "handle", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "spkopa_c", "(", "filename", ",", "ctypes", ".", "byref", "(", "handle", ")", ")", "return", "handle", ".", "value" ]
Validate the length of value if min_length or max_length was specified
def validate_length ( self , value ) : list_len = len ( value ) if value else 0 if self . max_length is not None and list_len > self . max_length : raise ValidationError ( u'List has {} values; max length is {}' . format ( list_len , self . max_length ) ) if self . min_length is not None and list_len < self . min_length : raise ValidationError ( u'List has {} values; min length is {}' . format ( list_len , self . min_length ) )
1,787
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L63-L73
[ "def", "_linux_disks", "(", ")", ":", "ret", "=", "{", "'disks'", ":", "[", "]", ",", "'SSDs'", ":", "[", "]", "}", "for", "entry", "in", "glob", ".", "glob", "(", "'/sys/block/*/queue/rotational'", ")", ":", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "entry", ")", "as", "entry_fp", ":", "device", "=", "entry", ".", "split", "(", "'/'", ")", "[", "3", "]", "flag", "=", "entry_fp", ".", "read", "(", "1", ")", "if", "flag", "==", "'0'", ":", "ret", "[", "'SSDs'", "]", ".", "append", "(", "device", ")", "log", ".", "trace", "(", "'Device %s reports itself as an SSD'", ",", "device", ")", "elif", "flag", "==", "'1'", ":", "ret", "[", "'disks'", "]", ".", "append", "(", "device", ")", "log", ".", "trace", "(", "'Device %s reports itself as an HDD'", ",", "device", ")", "else", ":", "log", ".", "trace", "(", "'Unable to identify device %s as an SSD or HDD. It does '", "'not report 0 or 1'", ",", "device", ")", "except", "IOError", ":", "pass", "return", "ret" ]
Validate the network resource with exponential backoff
def validate_resource ( self , value ) : def do_backoff ( * args , * * kwargs ) : """Call self._test_connection with exponential backoff, for self._max_tries attempts""" attempts = 0 while True : try : self . _test_connection ( * args , * * kwargs ) break except ValidationError : wait_secs = min ( self . _max_wait , 2 ** attempts ) attempts += 1 if attempts < self . _max_tries : time . sleep ( wait_secs ) else : raise do_backoff ( value )
1,788
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/property_types.py#L101-L119
[ "def", "list_adb_devices_by_usb_id", "(", ")", ":", "out", "=", "adb", ".", "AdbProxy", "(", ")", ".", "devices", "(", "[", "'-l'", "]", ")", "clean_lines", "=", "new_str", "(", "out", ",", "'utf-8'", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "results", "=", "[", "]", "for", "line", "in", "clean_lines", ":", "tokens", "=", "line", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", ">", "2", "and", "tokens", "[", "1", "]", "==", "'device'", ":", "results", ".", "append", "(", "tokens", "[", "2", "]", ")", "return", "results" ]
Returns a list of metafeatures computable by the Metafeatures class .
def list_metafeatures ( cls , group = "all" ) : # todo make group for intractable metafeatures for wide datasets or # datasets with high cardinality categorical columns: # PredPCA1, PredPCA2, PredPCA3, PredEigen1, PredEigen2, PredEigen3, # PredDet, kNN1NErrRate, kNN1NKappa, LinearDiscriminantAnalysisKappa, # LinearDiscriminantAnalysisErrRate if group == "all" : return copy . deepcopy ( cls . IDS ) elif group == "landmarking" : return list ( filter ( lambda mf_id : "ErrRate" in mf_id or "Kappa" in mf_id , cls . IDS ) ) elif group == "target_dependent" : return list ( filter ( cls . _resource_is_target_dependent , cls . IDS ) ) else : raise ValueError ( f"Unknown group {group}" )
1,789
https://github.com/byu-dml/metalearn/blob/0a3b7cb339250144f6d2f70977f74fe457cecee3/metalearn/metafeatures/metafeatures.py#L49-L69
[ "def", "fill_sampling", "(", "slice_list", ",", "N", ")", ":", "A", "=", "[", "len", "(", "s", ".", "inliers", ")", "for", "s", "in", "slice_list", "]", "N_max", "=", "np", ".", "sum", "(", "A", ")", "if", "N", ">", "N_max", ":", "raise", "ValueError", "(", "\"Tried to draw {:d} samples from a pool of only {:d} items\"", ".", "format", "(", "N", ",", "N_max", ")", ")", "samples_from", "=", "np", ".", "zeros", "(", "(", "len", "(", "A", ")", ",", ")", ",", "dtype", "=", "'int'", ")", "# Number of samples to draw from each group", "remaining", "=", "N", "while", "remaining", ">", "0", ":", "remaining_groups", "=", "np", ".", "flatnonzero", "(", "samples_from", "-", "np", ".", "array", "(", "A", ")", ")", "if", "remaining", "<", "len", "(", "remaining_groups", ")", ":", "np", ".", "random", ".", "shuffle", "(", "remaining_groups", ")", "for", "g", "in", "remaining_groups", "[", ":", "remaining", "]", ":", "samples_from", "[", "g", "]", "+=", "1", "else", ":", "# Give each group the allowed number of samples. Constrain to their max size.", "to_each", "=", "max", "(", "1", ",", "int", "(", "remaining", "/", "len", "(", "remaining_groups", ")", ")", ")", "samples_from", "=", "np", ".", "min", "(", "np", ".", "vstack", "(", "(", "samples_from", "+", "to_each", ",", "A", ")", ")", ",", "axis", "=", "0", ")", "# Update remaining count", "remaining", "=", "int", "(", "N", "-", "np", ".", "sum", "(", "samples_from", ")", ")", "if", "not", "remaining", "==", "0", ":", "raise", "ValueError", "(", "\"Still {:d} samples left! This is an error in the selection.\"", ")", "# Construct index list of selected samples", "samples", "=", "[", "]", "for", "s", ",", "a", ",", "n", "in", "zip", "(", "slice_list", ",", "A", ",", "samples_from", ")", ":", "if", "a", "==", "n", ":", "samples", ".", "append", "(", "np", ".", "array", "(", "s", ".", "inliers", ")", ")", "# all", "elif", "a", "==", "0", ":", "samples", ".", "append", "(", "np", ".", "arange", "(", "[", "]", ")", ")", "else", ":", "chosen", "=", "np", ".", "random", ".", "choice", "(", "s", ".", "inliers", ",", "n", ",", "replace", "=", "False", ")", "samples", ".", "append", "(", "np", ".", "array", "(", "chosen", ")", ")", "return", "samples" ]
Stratified uniform sampling of rows according to the classes in Y . Ensures there are enough samples from each class in Y for cross validation .
def _sample_rows ( self , X , Y , sample_shape , seed ) : if sample_shape [ 0 ] is None or X . shape [ 0 ] <= sample_shape [ 0 ] : X_sample , Y_sample = X , Y elif Y is None : np . random . seed ( seed ) row_indices = np . random . choice ( X . shape [ 0 ] , size = sample_shape [ 0 ] , replace = False ) X_sample , Y_sample = X . iloc [ row_indices ] , Y else : drop_size = X . shape [ 0 ] - sample_shape [ 0 ] sample_size = sample_shape [ 0 ] sss = StratifiedShuffleSplit ( n_splits = 2 , test_size = drop_size , train_size = sample_size , random_state = seed ) row_indices , _ = next ( sss . split ( X , Y ) ) X_sample , Y_sample = X . iloc [ row_indices ] , Y . iloc [ row_indices ] return ( X_sample , Y_sample )
1,790
https://github.com/byu-dml/metalearn/blob/0a3b7cb339250144f6d2f70977f74fe457cecee3/metalearn/metafeatures/metafeatures.py#L466-L488
[ "def", "add", "(", "self", ",", "interface_id", ",", "virtual_mapping", "=", "None", ",", "virtual_resource_name", "=", "None", ",", "zone_ref", "=", "None", ",", "comment", "=", "None", ")", ":", "interface", "=", "Layer3PhysicalInterface", "(", "engine", "=", "self", ".", "_engine", ",", "interface_id", "=", "interface_id", ",", "zone_ref", "=", "zone_ref", ",", "comment", "=", "comment", ",", "virtual_resource_name", "=", "virtual_resource_name", ",", "virtual_mapping", "=", "virtual_mapping", ")", "return", "self", ".", "_engine", ".", "add_interface", "(", "interface", ")" ]
Read data from the vault path
def _fetch_secrets ( vault_url , path , token ) : url = _url_joiner ( vault_url , 'v1' , path ) resp = requests . get ( url , headers = VaultLoader . _get_headers ( token ) ) resp . raise_for_status ( ) data = resp . json ( ) if data . get ( 'errors' ) : raise VaultException ( u'Error fetching Vault secrets from path {}: {}' . format ( path , data [ 'errors' ] ) ) return data [ 'data' ]
1,791
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L140-L149
[ "def", "isrot", "(", "m", ",", "ntol", ",", "dtol", ")", ":", "m", "=", "stypes", ".", "toDoubleMatrix", "(", "m", ")", "ntol", "=", "ctypes", ".", "c_double", "(", "ntol", ")", "dtol", "=", "ctypes", ".", "c_double", "(", "dtol", ")", "return", "bool", "(", "libspice", ".", "isrot_c", "(", "m", ",", "ntol", ",", "dtol", ")", ")" ]
Get a Vault token using the RoleID and SecretID
def _fetch_app_role_token ( vault_url , role_id , secret_id ) : url = _url_joiner ( vault_url , 'v1/auth/approle/login' ) resp = requests . post ( url , data = { 'role_id' : role_id , 'secret_id' : secret_id } ) resp . raise_for_status ( ) data = resp . json ( ) if data . get ( 'errors' ) : raise VaultException ( u'Error fetching Vault token: {}' . format ( data [ 'errors' ] ) ) return data [ 'auth' ] [ 'client_token' ]
1,792
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L152-L160
[ "def", "keep_types_s", "(", "s", ",", "types", ")", ":", "patt", "=", "'|'", ".", "join", "(", "'(?<=\\n)'", "+", "s", "+", "'\\n(?s).+?\\n(?=\\S+|$)'", "for", "s", "in", "types", ")", "return", "''", ".", "join", "(", "re", ".", "findall", "(", "patt", ",", "'\\n'", "+", "s", ".", "strip", "(", ")", "+", "'\\n'", ")", ")", ".", "rstrip", "(", ")" ]
Reread secrets from the vault path
def reload ( self ) : self . _source = self . _fetch_secrets ( self . _vault_url , self . _path , self . _token )
1,793
https://github.com/kensho-technologies/grift/blob/b8767d1604c1a0a25eace6cdd04b53b57afa9757/grift/loaders.py#L162-L164
[ "def", "rtt_write", "(", "self", ",", "buffer_index", ",", "data", ")", ":", "buf_size", "=", "len", "(", "data", ")", "buf", "=", "(", "ctypes", ".", "c_ubyte", "*", "buf_size", ")", "(", "*", "bytearray", "(", "data", ")", ")", "bytes_written", "=", "self", ".", "_dll", ".", "JLINK_RTTERMINAL_Write", "(", "buffer_index", ",", "buf", ",", "buf_size", ")", "if", "bytes_written", "<", "0", ":", "raise", "errors", ".", "JLinkRTTException", "(", "bytes_written", ")", "return", "bytes_written" ]
Sort sort options for display .
def sorted_options ( sort_options ) : return [ { 'title' : v [ 'title' ] , 'value' : ( '-{0}' . format ( k ) if v . get ( 'default_order' , 'asc' ) == 'desc' else k ) , } for k , v in sorted ( sort_options . items ( ) , key = lambda x : x [ 1 ] . get ( 'order' , 0 ) ) ]
1,794
https://github.com/inveniosoftware/invenio-search-ui/blob/4b61737f938cbfdc1aad6602a73f3a24d53b3312/invenio_search_ui/views.py#L29-L44
[ "def", "_tail_temp_file", "(", "self", ",", "temp_file", ",", "num_lines", ",", "seek_offset", "=", "10000", ")", ":", "if", "not", "isinstance", "(", "num_lines", ",", "int", ")", ":", "raise", "DagobahError", "(", "'num_lines must be an integer'", ")", "temp_file", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "size", "=", "temp_file", ".", "tell", "(", ")", "temp_file", ".", "seek", "(", "-", "1", "*", "min", "(", "size", ",", "seek_offset", ")", ",", "os", ".", "SEEK_END", ")", "result", "=", "[", "]", "while", "True", ":", "this_line", "=", "temp_file", ".", "readline", "(", ")", "if", "this_line", "==", "''", ":", "break", "result", ".", "append", "(", "this_line", ".", "strip", "(", ")", ")", "if", "len", "(", "result", ")", ">", "num_lines", ":", "result", ".", "pop", "(", "0", ")", "return", "result" ]
Converts html code into formatted plain text .
def html_to_plain_text ( html ) : # Use BeautifulSoup to normalize the html soup = BeautifulSoup ( html , "html.parser" ) # Init the parser parser = HTML2PlainParser ( ) parser . feed ( str ( soup . encode ( 'utf-8' ) ) ) # Strip the end of the plain text result = parser . text . rstrip ( ) # Add footnotes if parser . links : result += '\n\n' for link in parser . links : result += '[{}]: {}\n' . format ( link [ 0 ] , link [ 1 ] ) return result
1,795
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/utils/converter.py#L112-L126
[ "def", "stop", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "volumes", ":", "volumes", "=", "\" --bind \"", "+", "\" --bind \"", ".", "join", "(", "self", ".", "volumes", ")", "else", ":", "volumes", "=", "\"\"", "self", ".", "_print", "(", "\"Stopping container [{}]. The container ID is printed below.\"", ".", "format", "(", "self", ".", "name", ")", ")", "utils", ".", "xrun", "(", "\"singularity\"", ",", "[", "\"instance.stop {0:s}\"", ".", "format", "(", "self", ".", "name", ")", "]", ")", "self", ".", "status", "=", "\"exited\"", "return", "0" ]
Handles data between tags .
def handle_data ( self , data ) : # Only proceed with unignored elements if self . lasttag not in self . ignored_elements : # Remove any predefined linebreaks text = data . replace ( '\n' , '' ) # If there's some text left, proceed! if text : if self . lasttag == 'li' : # Use a special prefix for list elements self . text += ' * ' self . text += text if self . lasttag in self . newline_after_elements : # Add a linebreak at the end of the content self . text += '\n'
1,796
https://github.com/bitlabstudio/django-libs/blob/2c5376cda084bf16edea540e0f6999f1d844afd0/django_libs/utils/converter.py#L70-L84
[ "def", "catalogFactory", "(", "name", ",", "*", "*", "kwargs", ")", ":", "fn", "=", "lambda", "member", ":", "inspect", ".", "isclass", "(", "member", ")", "and", "member", ".", "__module__", "==", "__name__", "catalogs", "=", "odict", "(", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "__name__", "]", ",", "fn", ")", ")", "if", "name", "not", "in", "list", "(", "catalogs", ".", "keys", "(", ")", ")", ":", "msg", "=", "\"%s not found in catalogs:\\n %s\"", "%", "(", "name", ",", "list", "(", "kernels", ".", "keys", "(", ")", ")", ")", "logger", ".", "error", "(", "msg", ")", "msg", "=", "\"Unrecognized catalog: %s\"", "%", "name", "raise", "Exception", "(", "msg", ")", "return", "catalogs", "[", "name", "]", "(", "*", "*", "kwargs", ")" ]
Starts the thread
def run ( self ) : task = None monitor_task = MonitorTask ( notification_endpoint = self . _handle_message ) while True : if self . _terminate : self . _logger . info ( "scsgate.Reactor exiting" ) self . _connection . close ( ) break try : task = self . _request_queue . get_nowait ( ) self . _logger . debug ( "scsgate.Reactor: got task {}" . format ( task ) ) except queue . Empty : task = monitor_task try : task . execute ( connection = self . _connection ) except ExecutionError as err : self . _logger . error ( err )
1,797
https://github.com/flavio/scsgate/blob/aad1d181eef4714ab475f4ff7fcfac4a6425fbb4/scsgate/reactor.py#L31-L52
[ "def", "strip_uri", "(", "repo", ")", ":", "splits", "=", "repo", ".", "split", "(", ")", "for", "idx", "in", "range", "(", "len", "(", "splits", ")", ")", ":", "if", "any", "(", "splits", "[", "idx", "]", ".", "startswith", "(", "x", ")", "for", "x", "in", "(", "'http://'", ",", "'https://'", ",", "'ftp://'", ")", ")", ":", "splits", "[", "idx", "]", "=", "splits", "[", "idx", "]", ".", "rstrip", "(", "'/'", ")", "return", "' '", ".", "join", "(", "splits", ")" ]
Just a simple generator that creates a network with n nodes and n_edges edges . Edges are assigned randomly only avoiding self loops .
def mygenerator ( n = 5 , n_edges = 5 ) : G = nx . Graph ( ) for i in range ( n ) : G . add_node ( i ) for i in range ( n_edges ) : nodes = list ( G . nodes ) n_in = choice ( nodes ) nodes . remove ( n_in ) # Avoid loops n_out = choice ( nodes ) G . add_edge ( n_in , n_out ) return G
1,798
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/custom_generator/mymodule.py#L5-L21
[ "async", "def", "abbreviate_verkey", "(", "did", ":", "str", ",", "full_verkey", ":", "str", ")", "->", "str", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"abbreviate_verkey: >>> did: %r, full_verkey: %r\"", ",", "did", ",", "full_verkey", ")", "if", "not", "hasattr", "(", "abbreviate_verkey", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"abbreviate_verkey: Creating callback\"", ")", "abbreviate_verkey", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ")", ")", "c_did", "=", "c_char_p", "(", "did", ".", "encode", "(", "'utf-8'", ")", ")", "c_full_verkey", "=", "c_char_p", "(", "full_verkey", ".", "encode", "(", "'utf-8'", ")", ")", "metadata", "=", "await", "do_call", "(", "'indy_abbreviate_verkey'", ",", "c_did", ",", "c_full_verkey", ",", "abbreviate_verkey", ".", "cb", ")", "res", "=", "metadata", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"abbreviate_verkey: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Inserts files by recursive traversing the rootpath and inserting files according the addition filter parameters .
def insert_files ( self , rootpath , directoryInFilter = None , directoryExFilter = None , compileInFilter = None , compileExFilter = None , contentInFilter = None , contentExFilter = None ) : # Overrides directoryInFilter = self . DirectoryInFilter if directoryInFilter is None else directoryInFilter directoryExFilter = self . DirectoryExFilter if directoryExFilter is None else directoryExFilter compileInFilter = self . CompileInFilter if compileInFilter is None else compileInFilter compileExFilter = self . CompileExFilter if compileExFilter is None else compileExFilter contentInFilter = self . ContentInFilter if contentInFilter is None else contentInFilter contentExFilter = self . ContentExFilter if contentExFilter is None else contentExFilter def filter ( text , filters , explicit ) : """ Convience filter function :param text text: The target text. :param list filters: The collection of fnmatch expressions :param bool explicit: Flag denoting an the empty filter collection return match failure. """ if explicit : return any ( fnmatch . fnmatch ( text , f ) for f in filters ) return not filters or any ( fnmatch . fnmatch ( text , f ) for f in filters ) for root , dirnames , filenames in os . walk ( rootpath ) : searchdir = os . path . normpath ( os . path . normcase ( root ) ) # If the root dir matches an excluded directory, stop any further searches if filter ( searchdir , directoryExFilter , True ) : dirnames [ : ] = [ ] elif filter ( searchdir , directoryInFilter , False ) : for filepath in [ os . path . join ( root , filename ) for filename in filenames ] : if filter ( filepath , compileInFilter , False ) and not filter ( filepath , compileExFilter , True ) : self . CompileFiles . append ( filepath ) elif filter ( filepath , contentInFilter , False ) and not filter ( filepath , contentExFilter , True ) : self . ContentFiles . append ( filepath )
1,799
https://github.com/dbarsam/python-vsgen/blob/640191bb018a1ff7d7b7a4982e0d3c1a423ba878/vsgen/project.py#L165-L209
[ "def", "reset", "(", "self", ")", ":", "self", ".", "reset_bars", "(", ")", "self", ".", "url_progressbar", ".", "reset", "(", ")", "# reset all resetable components", "for", "prop", "in", "dir", "(", "self", ")", ":", "if", "prop", ".", "startswith", "(", "\"__\"", ")", ":", "continue", "prop_obj", "=", "getattr", "(", "self", ",", "prop", ")", "if", "prop_obj", "is", "not", "None", "and", "hasattr", "(", "prop_obj", ",", "\"reset\"", ")", ":", "prop_obj", ".", "reset", "(", ")", "# reset descriptors", "properties", "=", "(", "getattr", "(", "self", ".", "__class__", ",", "prop", ")", "for", "prop", "in", "self", ".", "_property_list", "if", "hasattr", "(", "self", ".", "__class__", ",", "prop", ")", ")", "for", "prop", "in", "properties", ":", "if", "hasattr", "(", "prop", ",", "\"reset\"", ")", ":", "prop", ".", "reset", "(", ")", "elif", "hasattr", "(", "prop", ",", "\"__set__\"", ")", ":", "prop", ".", "__set__", "(", "None", ",", "\"\"", ")", "self", ".", "additional_info", "=", "None" ]