query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
listlengths
20
553
Add a quantify modifier . Consider useing the high - level function add_derived_quantity instead!
def add_quantity_modifier ( self , quantity , modifier , overwrite = False ) : if quantity in self . _quantity_modifiers and not overwrite : raise ValueError ( 'quantity `{}` already exists' . format ( quantity ) ) self . _quantity_modifiers [ quantity ] = modifier self . _check_quantities_exist ( [ quantity ] , raise_exception = False )
12,100
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L194-L217
[ "def", "_init_cycle_dict", "(", "self", ")", ":", "dict_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "length_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "start_arr", "=", "np", ".", "zeros", "(", "self", ".", "epochs", ",", "dtype", "=", "int", ")", "c_len", "=", "self", ".", "cycle_len", "idx", "=", "0", "for", "i", "in", "range", "(", "self", ".", "cycles", ")", ":", "current_start", "=", "idx", "for", "j", "in", "range", "(", "c_len", ")", ":", "dict_arr", "[", "idx", "]", "=", "i", "length_arr", "[", "idx", "]", "=", "c_len", "start_arr", "[", "idx", "]", "=", "current_start", "idx", "+=", "1", "c_len", "*=", "self", ".", "cycle_mult", "return", "dict_arr", ",", "length_arr", ",", "start_arr" ]
Retrive a quantify modifier normalized . This function would also return a tuple with the first item a callable and the rest native quantity names
def get_normalized_quantity_modifier ( self , quantity ) : modifier = self . _quantity_modifiers . get ( quantity , self . _default_quantity_modifier ) if modifier is None : return ( trivial_callable , quantity ) if callable ( modifier ) : return ( modifier , quantity ) if isinstance ( modifier , ( tuple , list ) ) and len ( modifier ) > 1 and callable ( modifier [ 0 ] ) : return modifier return ( trivial_callable , modifier )
12,101
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L234-L259
[ "def", "create_new_page", "(", "self", ",", "section_id", ",", "new_page_style", "=", "0", ")", ":", "try", ":", "self", ".", "process", ".", "CreateNewPage", "(", "section_id", ",", "\"\"", ",", "new_page_style", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "print", "(", "\"Unable to create the page\"", ")" ]
Add a derived quantify modifier .
def add_derived_quantity ( self , derived_quantity , func , * quantities ) : if derived_quantity in self . _quantity_modifiers : raise ValueError ( 'quantity name `{}` already exists' . format ( derived_quantity ) ) if set ( quantities ) . issubset ( self . _native_quantities ) : new_modifier = ( func , ) + quantities else : functions = [ ] quantities_needed = [ ] quantity_count = [ ] for q in quantities : modifier = self . get_normalized_quantity_modifier ( q ) functions . append ( modifier [ 0 ] ) quantities_needed . extend ( modifier [ 1 : ] ) quantity_count . append ( len ( modifier ) - 1 ) def _new_func ( * x ) : assert len ( x ) == sum ( quantity_count ) count_current = 0 new_args = [ ] for func_this , count in zip ( functions , quantity_count ) : new_args . append ( func_this ( * x [ count_current : count_current + count ] ) ) count_current += count return func ( * new_args ) new_modifier = ( _new_func , ) + tuple ( quantities_needed ) self . add_quantity_modifier ( derived_quantity , new_modifier )
12,102
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L261-L304
[ "def", "close", "(", "self", ")", ":", "from", "neobolt", ".", "exceptions", "import", "ConnectionExpired", ",", "CypherError", ",", "ServiceUnavailable", "try", ":", "if", "self", ".", "has_transaction", "(", ")", ":", "try", ":", "self", ".", "rollback_transaction", "(", ")", "except", "(", "CypherError", ",", "TransactionError", ",", "SessionError", ",", "ConnectionExpired", ",", "ServiceUnavailable", ")", ":", "pass", "finally", ":", "self", ".", "_closed", "=", "True", "self", ".", "_disconnect", "(", "sync", "=", "True", ")" ]
Deprecated . Use add_derived_quantity instead .
def add_modifier_on_derived_quantities ( self , new_quantity , func , * quantities ) : warnings . warn ( "Use `add_derived_quantity` instead." , DeprecationWarning ) self . add_derived_quantity ( new_quantity , func , * quantities )
12,103
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L306-L311
[ "def", "find_best_frametype", "(", "channel", ",", "start", ",", "end", ",", "frametype_match", "=", "None", ",", "allow_tape", "=", "True", ",", "connection", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "try", ":", "return", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'error'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "RuntimeError", ":", "# gaps (or something else went wrong)", "ftout", "=", "find_frametype", "(", "channel", ",", "gpstime", "=", "(", "start", ",", "end", ")", ",", "frametype_match", "=", "frametype_match", ",", "return_all", "=", "True", ",", "allow_tape", "=", "allow_tape", ",", "on_gaps", "=", "'ignore'", ",", "connection", "=", "connection", ",", "host", "=", "host", ",", "port", "=", "port", ")", "try", ":", "if", "isinstance", "(", "ftout", ",", "dict", ")", ":", "return", "{", "key", ":", "ftout", "[", "key", "]", "[", "0", "]", "for", "key", "in", "ftout", "}", "return", "ftout", "[", "0", "]", "except", "IndexError", ":", "raise", "ValueError", "(", "\"Cannot find any valid frametypes for channel(s)\"", ")" ]
Checks if the wdiff command can be found .
def check_for_wdiff ( ) : cmd = [ 'which' , CMD_WDIFF ] DEVNULL = open ( os . devnull , 'wb' ) proc = sub . Popen ( cmd , stdout = DEVNULL ) proc . wait ( ) DEVNULL . close ( ) if proc . returncode != 0 : msg = "the `{}` command can't be found" . format ( CMD_WDIFF ) raise WdiffNotFoundError ( msg )
12,104
https://github.com/brutus/wdiffhtml/blob/e97b524a7945f7a626e33ec141343120c524d9fa/wdiffhtml/utils.py#L36-L52
[ "def", "dropout", "(", "x", ",", "keep_prob", ",", "noise_shape", "=", "None", ",", "name", "=", "None", ")", ":", "noise_shape", "=", "convert_to_shape", "(", "noise_shape", ")", "if", "noise_shape", "is", "None", ":", "noise_shape", "=", "x", ".", "shape", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"dropout\"", ")", ":", "if", "keep_prob", "==", "1.0", ":", "return", "x", "noise", "=", "cast", "(", "less", "(", "random_uniform", "(", "x", ".", "mesh", ",", "noise_shape", ",", "dtype", "=", "x", ".", "dtype", ")", ",", "keep_prob", ")", ",", "x", ".", "dtype", ")", "noise", "/=", "keep_prob", "return", "x", "*", "noise" ]
Returns the results from the wdiff command as a string .
def generate_wdiff ( org_file , new_file , fold_tags = False , html = True ) : check_for_wdiff ( ) cmd = [ CMD_WDIFF ] if html : cmd . extend ( OPTIONS_OUTPUT ) if not fold_tags : cmd . extend ( OPTIONS_LINEBREAK ) cmd . extend ( [ org_file , new_file ] ) proc = sub . Popen ( cmd , stdout = sub . PIPE ) diff , _ = proc . communicate ( ) return diff . decode ( 'utf-8' )
12,105
https://github.com/brutus/wdiffhtml/blob/e97b524a7945f7a626e33ec141343120c524d9fa/wdiffhtml/utils.py#L55-L79
[ "def", "on_startup_error", "(", "self", ",", "error", ")", ":", "LOGGER", ".", "critical", "(", "'Could not start %s: %s'", ",", "self", ".", "consumer_name", ",", "error", ")", "self", ".", "set_state", "(", "self", ".", "STATE_STOPPED", ")" ]
Create dialog body . Return widget that should have initial focus .
def body ( self , master ) : self . frame = ttk . Frame ( master , padding = ( 5 , 5 , 10 , 10 ) ) self . lbl_message = ttk . Label ( self . frame , text = 'Select User Type: ' , ) self . rb_student = ttk . Radiobutton ( self . frame , text = 'Student' , variable = self . rb_choice , value = 'student' , ) self . rb_tutor = ttk . Radiobutton ( self . frame , text = 'Tutor' , variable = self . rb_choice , value = 'tutor' , ) self . btn_ok = ttk . Button ( self . frame , text = 'OK' , command = self . ok , ) self . btn_cancel = ttk . Button ( self . frame , text = 'Cancel' , command = self . cancel , ) # assemble grid self . frame . grid ( column = 0 , row = 0 , sticky = ( N , S , E , W ) ) self . lbl_message . grid ( column = 0 , row = 0 , columnspan = 2 , sticky = ( W , E ) ) self . rb_student . grid ( column = 0 , row = 1 , columnspan = 2 , sticky = W ) self . rb_tutor . grid ( column = 0 , row = 2 , columnspan = 2 , sticky = W ) self . btn_ok . grid ( column = 0 , row = 3 ) self . btn_cancel . grid ( column = 1 , row = 3 ) # key bindings self . bind ( '<Return>' , self . ok ) self . bind ( '<KP_Enter>' , self . ok ) self . bind ( '<Escape>' , self . cancel ) self . rb_tutor . invoke ( ) return self . btn_ok
12,106
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/tkview.py#L240-L289
[ "def", "remove_stale_javascripts", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing stale javascripts ...\"", ")", "for", "js", "in", "JAVASCRIPTS_TO_REMOVE", ":", "logger", ".", "info", "(", "\"Unregistering JS %s\"", "%", "js", ")", "portal", ".", "portal_javascripts", ".", "unregisterResource", "(", "js", ")" ]
Inherited from tkinter . simpledialog . Dialog
def apply ( self ) : user_type = self . rb_choice . get ( ) if user_type == 'student' or user_type == 'tutor' : self . result = user_type
12,107
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/tkview.py#L321-L325
[ "def", "release", "(", "cls", ",", "entity", ",", "unit_of_work", ")", ":", "if", "not", "hasattr", "(", "entity", ",", "'__everest__'", ")", ":", "raise", "ValueError", "(", "'Trying to unregister an entity that has not '", "'been registered yet!'", ")", "elif", "not", "unit_of_work", "is", "entity", ".", "__everest__", ".", "unit_of_work", ":", "raise", "ValueError", "(", "'Trying to unregister an entity that has been '", "'registered with another session!'", ")", "delattr", "(", "entity", ",", "'__everest__'", ")" ]
Flag any entries from previous days where users forgot to sign out .
def flag_forgotten_entries ( session , today = None ) : # noqa today = date . today ( ) if today is None else today forgotten = ( session . query ( Entry ) . filter ( Entry . time_out . is_ ( None ) ) . filter ( Entry . forgot_sign_out . is_ ( False ) ) . filter ( Entry . date < today ) ) for entry in forgotten : e = sign_out ( entry , forgot = True ) logger . debug ( 'Signing out forgotten entry: {}' . format ( e ) ) session . add ( e ) session . commit ( )
12,108
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L65-L87
[ "def", "_is_compatible_with", "(", "self", ",", "other", ")", ":", "title", "=", "self", ".", "_compare_title", "(", "other", ")", "suffix", "=", "self", ".", "_compare_suffix", "(", "other", ")", "return", "title", "and", "suffix" ]
Return list of names of currently signed in users .
def signed_in_users ( session = None , today = None , full_name = True ) : # noqa if session is None : session = Session ( ) else : session = session if today is None : today = date . today ( ) else : today = today signed_in_users = ( session . query ( User ) . filter ( Entry . date == today ) . filter ( Entry . time_out . is_ ( None ) ) . filter ( User . user_id == Entry . user_id ) . all ( ) ) session . close ( ) return signed_in_users
12,109
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L90-L118
[ "def", "evpn_instance_mac_timer_max_count", "(", "self", ",", "*", "*", "kwargs", ")", ":", "evpn_instance_name", "=", "kwargs", ".", "pop", "(", "'evpn_instance_name'", ",", "''", ")", "max_count", "=", "kwargs", ".", "pop", "(", "'max_count'", ",", "'5'", ")", "enable", "=", "kwargs", ".", "pop", "(", "'enable'", ",", "True", ")", "get", "=", "kwargs", ".", "pop", "(", "'get'", ",", "False", ")", "rbridge_id", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ",", "'1'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "evpn_args", "=", "dict", "(", "instance_name", "=", "evpn_instance_name", ",", "max_count", "=", "max_count", ")", "if", "get", ":", "enable", "=", "None", "method_name", "=", "'rbridge_id_evpn_instance_duplicate_'", "'mac_timer_max_count'", "method_class", "=", "self", ".", "_rbridge", "evpn_args", "[", "'rbridge_id'", "]", "=", "rbridge_id", "evpn_instance_mac_timer_max_count", "=", "getattr", "(", "method_class", ",", "method_name", ")", "config", "=", "evpn_instance_mac_timer_max_count", "(", "*", "*", "evpn_args", ")", "if", "get", ":", "return", "callback", "(", "config", ",", "handler", "=", "'get_config'", ")", "if", "not", "enable", ":", "config", ".", "find", "(", "'.//*duplicate-mac-timer'", ")", ".", "set", "(", "'operation'", ",", "'delete'", ")", "return", "callback", "(", "config", ")" ]
Return the user s name as a string .
def get_user_name ( user , full_name = True ) : # noqa try : if full_name : name = ' ' . join ( [ user . first_name , user . last_name ] ) else : name = user . first_name except AttributeError : name = None return name
12,110
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L121-L136
[ "def", "delete_attachments", "(", "self", ",", "volumeID", ",", "attachmentsID", ")", ":", "log", ".", "debug", "(", "\"deleting attachments from volume '{}': {}\"", ".", "format", "(", "volumeID", ",", "attachmentsID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "insID", "=", "[", "a", "[", "'id'", "]", "for", "a", "in", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", "]", "# check that all requested file are present", "for", "id", "in", "attachmentsID", ":", "if", "id", "not", "in", "insID", ":", "raise", "NotFoundException", "(", "\"could not found attachment '{}' of the volume '{}'\"", ".", "format", "(", "id", ",", "volumeID", ")", ")", "for", "index", ",", "id", "in", "enumerate", "(", "attachmentsID", ")", ":", "rawVolume", "[", "'_source'", "]", "[", "'_attachments'", "]", ".", "pop", "(", "insID", ".", "index", "(", "id", ")", ")", "self", ".", "_db", ".", "modify_book", "(", "volumeID", ",", "rawVolume", "[", "'_source'", "]", ",", "version", "=", "rawVolume", "[", "'_version'", "]", ")" ]
Add a new entry to the timesheet .
def sign_in ( user , user_type = None , date = None , time_in = None ) : # noqa now = datetime . today ( ) if date is None : date = now . date ( ) if time_in is None : time_in = now . time ( ) if user_type is None : if user . is_student and user . is_tutor : raise AmbiguousUserType ( 'User is both a student and a tutor.' ) elif user . is_student : user_type = 'student' elif user . is_tutor : user_type = 'tutor' else : raise ValueError ( 'Unknown user type.' ) new_entry = Entry ( uuid = str ( uuid . uuid4 ( ) ) , date = date , time_in = time_in , time_out = None , user_id = user . user_id , user_type = user_type , user = user , ) logger . info ( '{} ({}) signed in.' . format ( new_entry . user_id , new_entry . user_type ) ) return new_entry
12,111
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L139-L174
[ "def", "parse_oxi_states", "(", "self", ",", "data", ")", ":", "try", ":", "oxi_states", "=", "{", "data", "[", "\"_atom_type_symbol\"", "]", "[", "i", "]", ":", "str2float", "(", "data", "[", "\"_atom_type_oxidation_number\"", "]", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "data", "[", "\"_atom_type_symbol\"", "]", ")", ")", "}", "# attempt to strip oxidation state from _atom_type_symbol", "# in case the label does not contain an oxidation state", "for", "i", ",", "symbol", "in", "enumerate", "(", "data", "[", "\"_atom_type_symbol\"", "]", ")", ":", "oxi_states", "[", "re", ".", "sub", "(", "r\"\\d?[\\+,\\-]?$\"", ",", "\"\"", ",", "symbol", ")", "]", "=", "str2float", "(", "data", "[", "\"_atom_type_oxidation_number\"", "]", "[", "i", "]", ")", "except", "(", "ValueError", ",", "KeyError", ")", ":", "oxi_states", "=", "None", "return", "oxi_states" ]
Sign out of an existing entry in the timesheet . If the user forgot to sign out flag the entry .
def sign_out ( entry , time_out = None , forgot = False ) : # noqa if time_out is None : time_out = datetime . today ( ) . time ( ) if forgot : entry . forgot_sign_out = True logger . info ( '{} forgot to sign out on {}.' . format ( entry . user_id , entry . date ) ) else : entry . time_out = time_out logger . info ( '{} ({}) signed out.' . format ( entry . user_id , entry . user_type ) ) return entry
12,112
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L177-L199
[ "async", "def", "services", "(", "self", ",", "*", ",", "dc", "=", "None", ",", "watch", "=", "None", ",", "consistency", "=", "None", ")", ":", "params", "=", "{", "\"dc\"", ":", "dc", "}", "response", "=", "await", "self", ".", "_api", ".", "get", "(", "\"/v1/catalog/services\"", ",", "params", "=", "params", ",", "watch", "=", "watch", ",", "consistency", "=", "consistency", ")", "return", "consul", "(", "response", ")" ]
Delete a signed in entry .
def undo_sign_in ( entry , session = None ) : # noqa if session is None : session = Session ( ) else : session = session entry_to_delete = ( session . query ( Entry ) . filter ( Entry . uuid == entry . uuid ) . one_or_none ( ) ) if entry_to_delete : logger . info ( 'Undo sign in: {}' . format ( entry_to_delete . user_id ) ) logger . debug ( 'Undo sign in: {}' . format ( entry_to_delete ) ) session . delete ( entry_to_delete ) session . commit ( ) else : error_message = 'Entry not found: {}' . format ( entry ) logger . error ( error_message ) raise ValueError ( error_message )
12,113
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L202-L228
[ "def", "parse_torrent_properties", "(", "table_datas", ")", ":", "output", "=", "{", "'category'", ":", "table_datas", "[", "0", "]", ".", "text", ",", "'subcategory'", ":", "None", ",", "'quality'", ":", "None", ",", "'language'", ":", "None", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "table_datas", ")", ")", ":", "td", "=", "table_datas", "[", "i", "]", "url", "=", "td", ".", "get", "(", "'href'", ")", "params", "=", "Parser", ".", "get_params", "(", "url", ")", "if", "Parser", ".", "is_subcategory", "(", "params", ")", "and", "not", "output", "[", "'subcategory'", "]", ":", "output", "[", "'subcategory'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_quality", "(", "params", ")", "and", "not", "output", "[", "'quality'", "]", ":", "output", "[", "'quality'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_language", "(", "params", ")", "and", "not", "output", "[", "'language'", "]", ":", "output", "[", "'language'", "]", "=", "td", ".", "text", "return", "output" ]
Sign in a signed out entry .
def undo_sign_out ( entry , session = None ) : # noqa if session is None : session = Session ( ) else : session = session entry_to_sign_in = ( session . query ( Entry ) . filter ( Entry . uuid == entry . uuid ) . one_or_none ( ) ) if entry_to_sign_in : logger . info ( 'Undo sign out: {}' . format ( entry_to_sign_in . user_id ) ) logger . debug ( 'Undo sign out: {}' . format ( entry_to_sign_in ) ) entry_to_sign_in . time_out = None session . add ( entry_to_sign_in ) session . commit ( ) else : error_message = 'Entry not found: {}' . format ( entry ) logger . error ( error_message ) raise ValueError ( error_message )
12,114
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L231-L258
[ "def", "initialize", "(", "*", "*", "kwargs", ")", ":", "global", "config", "config_opts", "=", "kwargs", ".", "setdefault", "(", "'config'", ",", "{", "}", ")", "if", "isinstance", "(", "config_opts", ",", "basestring", ")", ":", "config_opts", "=", "{", "'config_filename'", ":", "config_opts", "}", "kwargs", "[", "'config'", "]", "=", "config_opts", "if", "'environment'", "in", "kwargs", ":", "config_opts", "[", "'environment'", "]", "=", "kwargs", "[", "'environment'", "]", "config", ".", "load_config", "(", "*", "*", "config_opts", ")", "# Overlay the subconfig", "if", "kwargs", ".", "get", "(", "'name'", ")", ":", "subconfig", "=", "config", ".", "get", "(", "kwargs", ".", "get", "(", "'name'", ")", ",", "{", "}", ")", "config", ".", "overlay_add", "(", "subconfig", ")", "config", ".", "overlay_add", "(", "app_config", ")" ]
Check user id for validity then sign user in if they are signed out or out if they are signed in .
def sign ( user_id , user_type = None , today = None , session = None ) : # noqa if session is None : session = Session ( ) else : session = session if today is None : today = date . today ( ) else : today = today user = ( session . query ( User ) . filter ( User . user_id == user_id ) . one_or_none ( ) ) if user : signed_in_entries = ( user . entries . filter ( Entry . date == today ) . filter ( Entry . time_out . is_ ( None ) ) . all ( ) ) if not signed_in_entries : new_entry = sign_in ( user , user_type = user_type ) session . add ( new_entry ) status = Status ( valid = True , in_or_out = 'in' , user_name = get_user_name ( user ) , user_type = new_entry . user_type , entry = new_entry ) else : for entry in signed_in_entries : signed_out_entry = sign_out ( entry ) session . add ( signed_out_entry ) status = Status ( valid = True , in_or_out = 'out' , user_name = get_user_name ( user ) , user_type = signed_out_entry . user_type , entry = signed_out_entry ) session . commit ( ) else : raise UnregisteredUser ( '{} not registered. Please register at the front desk.' . format ( user_id ) ) logger . debug ( status ) return status
12,115
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/controller.py#L261-L330
[ "def", "find_threads_by_name", "(", "self", ",", "name", ",", "bExactMatch", "=", "True", ")", ":", "found_threads", "=", "list", "(", ")", "# Find threads with no name.", "if", "name", "is", "None", ":", "for", "aThread", "in", "self", ".", "iter_threads", "(", ")", ":", "if", "aThread", ".", "get_name", "(", ")", "is", "None", ":", "found_threads", ".", "append", "(", "aThread", ")", "# Find threads matching the given name exactly.", "elif", "bExactMatch", ":", "for", "aThread", "in", "self", ".", "iter_threads", "(", ")", ":", "if", "aThread", ".", "get_name", "(", ")", "==", "name", ":", "found_threads", ".", "append", "(", "aThread", ")", "# Find threads whose names match the given substring.", "else", ":", "for", "aThread", "in", "self", ".", "iter_threads", "(", ")", ":", "t_name", "=", "aThread", ".", "get_name", "(", ")", "if", "t_name", "is", "not", "None", "and", "name", "in", "t_name", ":", "found_threads", ".", "append", "(", "aThread", ")", "return", "found_threads" ]
Override for better log format
def format_request ( self ) : fmt = '{now} {status} {requestline} ({client_address}) {response_length} {delta}ms' requestline = getattr ( self , 'requestline' ) if requestline : # Original "GET / HTTP/1.1", remove the "HTTP/1.1" requestline = ' ' . join ( requestline . split ( ' ' ) [ : - 1 ] ) else : requestline = '???' if self . time_finish : delta = '%.2f' % ( ( self . time_finish - self . time_start ) * 1000 ) else : delta = '-' data = dict ( now = datetime . datetime . now ( ) . replace ( microsecond = 0 ) , response_length = self . response_length or '-' , client_address = self . client_address [ 0 ] if isinstance ( self . client_address , tuple ) else self . client_address , status = str ( self . _get_status_int ( ) ) , requestline = requestline , delta = delta , ) return fmt . format ( * * data )
12,116
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/gevent_wsgi.py#L21-L52
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
This method copies the code from pywsgi . WSGIHandler . handle_error change the write part to be a reflection of traceback and environ
def handle_error ( self , type_ , value , tb ) : if not issubclass ( type_ , pywsgi . GreenletExit ) : self . server . loop . handle_error ( self . environ , type_ , value , tb ) if self . response_length : self . close_connection = True else : tb_stream = traceback . format_exception ( type_ , value , tb ) del tb tb_stream . append ( '\n' ) tb_stream . append ( pprint . pformat ( self . environ ) ) body = '' . join ( tb_stream ) headers = pywsgi . _INTERNAL_ERROR_HEADERS [ : ] headers [ 2 ] = ( 'Content-Length' , str ( len ( body ) ) ) self . start_response ( pywsgi . _INTERNAL_ERROR_STATUS , headers ) self . write ( body )
12,117
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/gevent_wsgi.py#L82-L100
[ "def", "addSubsumableToGroups", "(", "self", ",", "proteinIds", ",", "groupIds", ")", ":", "for", "groupId", "in", "AUX", ".", "toList", "(", "groupIds", ")", ":", "self", ".", "groups", "[", "groupId", "]", ".", "addSubsumableProteins", "(", "proteinIds", ")", "self", ".", "_addProteinIdsToGroupMapping", "(", "proteinIds", ",", "groupId", ")" ]
Remove all measurements from self . measurements . Reset the measurement counter . All ID are invalidated .
def clear_measurements ( self ) : keys = list ( self . measurements . keys ( ) ) for key in keys : del ( self . measurements [ key ] ) self . meas_counter = - 1
12,118
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L56-L63
[ "def", "_get_cursor", "(", "self", ")", ":", "_options", "=", "self", ".", "_get_options", "(", ")", "conn", "=", "psycopg2", ".", "connect", "(", "host", "=", "_options", "[", "'host'", "]", ",", "user", "=", "_options", "[", "'user'", "]", ",", "password", "=", "_options", "[", "'pass'", "]", ",", "dbname", "=", "_options", "[", "'db'", "]", ",", "port", "=", "_options", "[", "'port'", "]", ")", "cursor", "=", "conn", ".", "cursor", "(", ")", "try", ":", "yield", "cursor", "log", ".", "debug", "(", "'Connected to POSTGRES DB'", ")", "except", "psycopg2", ".", "DatabaseError", "as", "err", ":", "log", ".", "exception", "(", "'Error in ext_pillar POSTGRES: %s'", ",", "err", ".", "args", ")", "finally", ":", "conn", ".", "close", "(", ")" ]
Add new measurements to this instance
def add_measurements ( self , measurements ) : subdata = np . atleast_2d ( measurements ) if self . configs is None : raise Exception ( 'must read in configuration before measurements can be stored' ) # we try to accommodate transposed input if subdata . shape [ 1 ] != self . configs . shape [ 0 ] : if subdata . shape [ 0 ] == self . configs . shape [ 0 ] : subdata = subdata . T else : raise Exception ( 'Number of measurements does not match number of configs' ) return_ids = [ ] for dataset in subdata : cid = self . _get_next_index ( ) self . measurements [ cid ] = dataset . copy ( ) return_ids . append ( cid ) if len ( return_ids ) == 1 : return return_ids [ 0 ] else : return return_ids
12,119
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L120-L173
[ "def", "diag_ksl", "(", "A", ",", "y0", ",", "tau", ",", "verb", "=", "1", ",", "scheme", "=", "'symm'", ",", "space", "=", "8", ",", "rmax", "=", "2000", ")", ":", "y0", "=", "y0", ".", "round", "(", "1e-14", ")", "# This will fix ranks", "# to be no more than maximal reasonable.", "# Fortran part doesn't handle excessive ranks", "ry", "=", "y0", ".", "r", ".", "copy", "(", ")", "if", "scheme", "is", "'symm'", ":", "tp", "=", "2", "else", ":", "tp", "=", "1", "# Check for dtype", "y", "=", "tt", ".", "vector", "(", ")", "if", "np", ".", "iscomplex", "(", "A", ".", "core", ")", ".", "any", "(", ")", "or", "np", ".", "iscomplex", "(", "y0", ".", "core", ")", ".", "any", "(", ")", ":", "dyn_tt", ".", "dyn_diag_tt", ".", "ztt_diag_ksl", "(", "y0", ".", "d", ",", "A", ".", "n", ",", "A", ".", "r", ",", "A", ".", "core", "+", "0j", ",", "y0", ".", "core", "+", "0j", ",", "ry", ",", "tau", ",", "rmax", ",", "0", ",", "10", ",", "verb", ",", "tp", ",", "space", ")", "y", ".", "core", "=", "dyn_tt", ".", "dyn_diag_tt", ".", "zresult_core", ".", "copy", "(", ")", "else", ":", "A", ".", "core", "=", "np", ".", "real", "(", "A", ".", "core", ")", "y0", ".", "core", "=", "np", ".", "real", "(", "y0", ".", "core", ")", "dyn_tt", ".", "dyn_diag_tt", ".", "dtt_diag_ksl", "(", "y0", ".", "d", ",", "A", ".", "n", ",", "A", ".", "r", ",", "A", ".", "core", ",", "y0", ".", "core", ",", "ry", ",", "tau", ",", "rmax", ",", "0", ",", "10", ",", "verb", ",", "tp", ",", "space", ")", "y", ".", "core", "=", "dyn_tt", ".", "dyn_diag_tt", ".", "dresult_core", ".", "copy", "(", ")", "dyn_tt", ".", "dyn_diag_tt", ".", "deallocate_result", "(", ")", "y", ".", "d", "=", "y0", ".", "d", "y", ".", "n", "=", "A", ".", "n", ".", "copy", "(", ")", "y", ".", "r", "=", "ry", "y", ".", "get_ps", "(", ")", "return", "y" ]
For a given set of current injections AB generate all possible unique potential measurements .
def gen_all_voltages_for_injections ( self , injections_raw ) : injections = injections_raw . astype ( int ) N = self . nr_electrodes all_quadpoles = [ ] for idipole in injections : # sort current electrodes and convert to array indices Icurrent = np . sort ( idipole ) - 1 # voltage electrodes velecs = list ( range ( 1 , N + 1 ) ) # remove current electrodes del ( velecs [ Icurrent [ 1 ] ] ) del ( velecs [ Icurrent [ 0 ] ] ) # permutate remaining voltages = itertools . permutations ( velecs , 2 ) for voltage in voltages : all_quadpoles . append ( ( idipole [ 0 ] , idipole [ 1 ] , voltage [ 0 ] , voltage [ 1 ] ) ) configs_unsorted = np . array ( all_quadpoles ) # sort AB and MN configs_sorted = np . hstack ( ( np . sort ( configs_unsorted [ : , 0 : 2 ] , axis = 1 ) , np . sort ( configs_unsorted [ : , 2 : 4 ] , axis = 1 ) , ) ) configs = self . remove_duplicates ( configs_sorted ) self . add_to_configs ( configs ) self . remove_duplicates ( ) return configs
12,120
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L893-L954
[ "def", "disconnect", "(", "self", ",", "receiver", ")", ":", "if", "receiver", "not", "in", "self", ".", "receivers", ".", "keys", "(", ")", ":", "raise", "Exception", "(", "\"No receiver %s was registered\"", "%", "receiver", ")", "self", ".", "receivers", "[", "receiver", "]", ".", "disconnect", "(", ")", "del", "(", "self", ".", "receivers", "[", "receiver", "]", ")", "self", ".", "__log", ".", "debug", "(", "\"Receiver %s disconnected\"", "%", "receiver", ")" ]
Generate Wenner measurement configurations .
def gen_wenner ( self , a ) : configs = [ ] for i in range ( 1 , self . nr_electrodes - 3 * a + 1 ) : configs . append ( ( i , i + a , i + 2 * a , i + 3 * a ) , ) configs = np . array ( configs ) self . add_to_configs ( configs ) return configs
12,121
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L1108-L1129
[ "async", "def", "copy_to_table", "(", "self", ",", "table_name", ",", "*", ",", "source", ",", "columns", "=", "None", ",", "schema_name", "=", "None", ",", "timeout", "=", "None", ",", "format", "=", "None", ",", "oids", "=", "None", ",", "freeze", "=", "None", ",", "delimiter", "=", "None", ",", "null", "=", "None", ",", "header", "=", "None", ",", "quote", "=", "None", ",", "escape", "=", "None", ",", "force_quote", "=", "None", ",", "force_not_null", "=", "None", ",", "force_null", "=", "None", ",", "encoding", "=", "None", ")", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "table_name", ")", "if", "schema_name", ":", "tabname", "=", "utils", ".", "_quote_ident", "(", "schema_name", ")", "+", "'.'", "+", "tabname", "if", "columns", ":", "cols", "=", "'({})'", ".", "format", "(", "', '", ".", "join", "(", "utils", ".", "_quote_ident", "(", "c", ")", "for", "c", "in", "columns", ")", ")", "else", ":", "cols", "=", "''", "opts", "=", "self", ".", "_format_copy_opts", "(", "format", "=", "format", ",", "oids", "=", "oids", ",", "freeze", "=", "freeze", ",", "delimiter", "=", "delimiter", ",", "null", "=", "null", ",", "header", "=", "header", ",", "quote", "=", "quote", ",", "escape", "=", "escape", ",", "force_not_null", "=", "force_not_null", ",", "force_null", "=", "force_null", ",", "encoding", "=", "encoding", ")", "copy_stmt", "=", "'COPY {tab}{cols} FROM STDIN {opts}'", ".", "format", "(", "tab", "=", "tabname", ",", "cols", "=", "cols", ",", "opts", "=", "opts", ")", "return", "await", "self", ".", "_copy_in", "(", "copy_stmt", ",", "source", ",", "timeout", ")" ]
For a given set of quadrupoles generate and return reciprocals
def gen_reciprocals ( self , quadrupoles ) : reciprocals = quadrupoles [ : , : : - 1 ] . copy ( ) reciprocals [ : , 0 : 2 ] = np . sort ( reciprocals [ : , 0 : 2 ] , axis = 1 ) reciprocals [ : , 2 : 4 ] = np . sort ( reciprocals [ : , 2 : 4 ] , axis = 1 ) return reciprocals
12,122
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L1253-L1259
[ "def", "duration", "(", "self", ")", ":", "if", "self", ".", "completion_ts", ":", "end", "=", "self", ".", "completed", "else", ":", "end", "=", "datetime", ".", "utcnow", "(", ")", "return", "end", "-", "self", ".", "started" ]
Compute analytical geometrical factors .
def compute_K_factors ( self , spacing = None , configs = None , numerical = False , elem_file = None , elec_file = None ) : if configs is None : use_configs = self . configs else : use_configs = configs if numerical : settings = { 'elem' : elem_file , 'elec' : elec_file , 'rho' : 100 , } K = edfK . compute_K_numerical ( use_configs , settings ) else : K = edfK . compute_K_analytical ( use_configs , spacing = spacing ) return K
12,123
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L1299-L1319
[ "def", "run_all", "(", ")", ":", "DATA_DIR", "=", "\"/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels\"", "dates", "=", "os", ".", "listdir", "(", "\"/home/share/LAMOST/DR2/DR2_release\"", ")", "dates", "=", "np", ".", "array", "(", "dates", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'.directory'", ")", "[", "0", "]", "[", "0", "]", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'all_folders.list'", ")", "[", "0", "]", "[", "0", "]", ")", "dates", "=", "np", ".", "delete", "(", "dates", ",", "np", ".", "where", "(", "dates", "==", "'dr2.lis'", ")", "[", "0", "]", "[", "0", "]", ")", "for", "date", "in", "dates", ":", "if", "glob", ".", "glob", "(", "\"*%s*.txt\"", "%", "date", ")", ":", "print", "(", "\"%s done\"", "%", "date", ")", "else", ":", "print", "(", "\"running %s\"", "%", "date", ")", "run_one_date", "(", "date", ")" ]
Checks if this rule applies to the given src and dst paths based on the src pattern and dst pattern given in the constructor .
def applies ( self , src , dst ) : if self . _src_pattern and ( src is None or re . search ( self . _src_pattern , src ) is None ) : return False elif self . _dst_pattern and ( dst is None or re . search ( self . _dst_pattern , dst ) is None ) : return False return True
12,124
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/api/rules/core.py#L33-L43
[ "def", "mark_job_as_canceling", "(", "self", ",", "job_id", ")", ":", "job", ",", "_", "=", "self", ".", "_update_job_state", "(", "job_id", ",", "State", ".", "CANCELING", ")", "return", "job" ]
Create a new UnboundNode representing a given class .
def _createunbound ( kls , * * info ) : if issubclass ( kls , Bitfield ) : nodetype = UnboundBitfieldNode elif hasattr ( kls , '_fields_' ) : nodetype = UnboundStructureNode elif issubclass ( kls , ctypes . Array ) : nodetype = UnboundArrayNode else : nodetype = UnboundSimpleNode return nodetype ( type = kls , * * info )
12,125
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L240-L251
[ "def", "competition_list_files_cli", "(", "self", ",", "competition", ",", "competition_opt", "=", "None", ",", "csv_display", "=", "False", ",", "quiet", "=", "False", ")", ":", "competition", "=", "competition", "or", "competition_opt", "if", "competition", "is", "None", ":", "competition", "=", "self", ".", "get_config_value", "(", "self", ".", "CONFIG_NAME_COMPETITION", ")", "if", "competition", "is", "not", "None", "and", "not", "quiet", ":", "print", "(", "'Using competition: '", "+", "competition", ")", "if", "competition", "is", "None", ":", "raise", "ValueError", "(", "'No competition specified'", ")", "else", ":", "files", "=", "self", ".", "competition_list_files", "(", "competition", ")", "fields", "=", "[", "'name'", ",", "'size'", ",", "'creationDate'", "]", "if", "files", ":", "if", "csv_display", ":", "self", ".", "print_csv", "(", "files", ",", "fields", ")", "else", ":", "self", ".", "print_table", "(", "files", ",", "fields", ")", "else", ":", "print", "(", "'No files found'", ")" ]
Create a new BoundNode representing a given object .
def _createbound ( obj ) : # Start by allowing objects to define custom unbound reference hooks try : kls = obj . _unboundreference_ ( ) except AttributeError : kls = type ( obj ) unbound = _createunbound ( kls ) def valueget ( ) : return obj for t in ( BoundBitfieldNode , BoundStructureNode , BoundArrayNode ) : if isinstance ( unbound , t . _unboundtype ) : kls = t break else : kls = BoundSimpleNode child = kls ( unbound , valueget ) return child
12,126
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L361-L380
[ "def", "get_measurement_responses", "(", "self", ")", ":", "# take configurations from first tomodir", "configs", "=", "self", ".", "tds", "[", "sorted", "(", "self", ".", "tds", ".", "keys", "(", ")", ")", "[", "0", "]", "]", ".", "configs", ".", "configs", "measurements", "=", "self", ".", "measurements", "(", ")", "responses", "=", "{", "}", "for", "config", ",", "sip_measurement", "in", "zip", "(", "configs", ",", "np", ".", "rollaxis", "(", "measurements", ",", "1", ")", ")", ":", "sip", "=", "sip_response", "(", "frequencies", "=", "self", ".", "frequencies", ",", "rmag", "=", "sip_measurement", "[", ":", ",", "0", "]", ",", "rpha", "=", "sip_measurement", "[", ":", ",", "1", "]", ")", "responses", "[", "tuple", "(", "config", ")", "]", "=", "sip", "return", "responses" ]
Print a view of obj where obj is either a ctypes - derived class or an instance of such a class . Any additional keyword arguments are passed directly to the print function . This is mostly useful to introspect structures from an interactive session .
def display ( obj , skiphidden = True , * * printargs ) : top = findnode ( obj ) #------------------------------------------------------------------- # Iterate through the entire structure turning all the nodes into # tuples of strings for display. maxhex = len ( hex ( ctypes . sizeof ( top . type ) ) ) - 2 def addrformat ( addr ) : if isinstance ( addr , int ) : return "0x{0:0{1}X}" . format ( addr , maxhex ) else : intpart = int ( addr ) fracbits = int ( ( addr - intpart ) * 8 ) return "0x{0:0{1}X}'{2}" . format ( intpart , maxhex , fracbits ) def formatval ( here ) : if isinstance ( here , BoundSimpleNode ) : return "{0}({1})" . format ( here . type . __name__ , here . value ) else : return str ( here . value ) if isinstance ( top , UnboundNode ) : headers = [ 'Path' , 'Addr' , 'Type' ] results = [ ( ( ' ' * n . depth ) + n . name , addrformat ( n . baseoffset ) , n . type . __name__ ) for n in walknode ( top , skiphidden ) ] else : headers = [ 'Path' , 'Addr' , 'Value' ] results = [ ( ( ' ' * n . depth ) + n . name , addrformat ( n . baseoffset ) , formatval ( n ) ) for n in walknode ( top , skiphidden ) ] #------------------------------------------------------------------- # Determine the maximum width of the text in each column, make the # column always that wide. widths = [ max ( max ( len ( d [ col ] ) for d in results ) , len ( h ) ) for col , h in enumerate ( headers ) ] #------------------------------------------------------------------- # Print out the tabular data. def lp ( args ) : print ( * args , * * printargs ) lp ( d . center ( w ) for d , w in zip ( headers , widths ) ) lp ( '-' * w for w in widths ) for r in results : lp ( d . ljust ( w ) for d , w in zip ( r , widths ) )
12,127
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L507-L567
[ "def", "delete_persistent_data", "(", "role", ",", "zk_node", ")", ":", "if", "role", ":", "destroy_volumes", "(", "role", ")", "unreserve_resources", "(", "role", ")", "if", "zk_node", ":", "delete_zk_node", "(", "zk_node", ")" ]
A list of the parts of the path with the root node returning an empty list .
def pathparts ( self ) : try : parts = self . parent . pathparts ( ) parts . append ( self . name ) return parts except AttributeError : return [ ]
12,128
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L166-L175
[ "def", "set_USRdict", "(", "self", ",", "USRdict", "=", "{", "}", ")", ":", "self", ".", "_check_inputs", "(", "USRdict", "=", "USRdict", ")", "self", ".", "_USRdict", "=", "USRdict" ]
The offset of this node from the root node .
def baseoffset ( self ) : try : return self . parent . baseoffset + self . offset except AttributeError : return self . offset
12,129
https://github.com/NJDFan/ctypes-bitfield/blob/ae76b1dcfef7ecc90bd1900735b94ddee41a6376/bitfield/walk.py#L192-L197
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Check if the two numbers are almost equal
def _almost_equal ( a , b ) : # arbitrary small number!!! threshold = 1e-9 diff = np . abs ( a - b ) return ( diff < threshold )
12,130
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/grid_translate_model.py#L62-L68
[ "def", "exec_start", "(", "self", ",", "exec_id", ",", "detach", "=", "False", ",", "tty", "=", "False", ",", "stream", "=", "False", ",", "socket", "=", "False", ",", "demux", "=", "False", ")", ":", "# we want opened socket if socket == True", "data", "=", "{", "'Tty'", ":", "tty", ",", "'Detach'", ":", "detach", "}", "headers", "=", "{", "}", "if", "detach", "else", "{", "'Connection'", ":", "'Upgrade'", ",", "'Upgrade'", ":", "'tcp'", "}", "res", "=", "self", ".", "_post_json", "(", "self", ".", "_url", "(", "'/exec/{0}/start'", ",", "exec_id", ")", ",", "headers", "=", "headers", ",", "data", "=", "data", ",", "stream", "=", "True", ")", "if", "detach", ":", "return", "self", ".", "_result", "(", "res", ")", "if", "socket", ":", "return", "self", ".", "_get_raw_response_socket", "(", "res", ")", "return", "self", ".", "_read_from_socket", "(", "res", ",", "stream", ",", "tty", "=", "tty", ",", "demux", "=", "demux", ")" ]
Complement the alleles of this variant .
def complement_alleles ( self ) : self . alleles = self . _encode_alleles ( [ complement_alleles ( i ) for i in self . alleles ] )
12,131
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/core.py#L139-L150
[ "def", "process_msg", "(", "self", ",", "msg", ")", ":", "jmsg", "=", "json", ".", "loads", "(", "msg", ")", "msgtype", "=", "jmsg", "[", "'MessageType'", "]", "msgdata", "=", "jmsg", "[", "'Data'", "]", "_LOGGER", ".", "debug", "(", "'New websocket message recieved of type: %s'", ",", "msgtype", ")", "if", "msgtype", "==", "'Sessions'", ":", "self", ".", "_sessions", "=", "msgdata", "# Check for new devices and update as needed.", "self", ".", "update_device_list", "(", "self", ".", "_sessions", ")", "\"\"\"\n May process other message types in the future.\n Other known types are:\n - PlaybackStarted\n - PlaybackStopped\n - SessionEnded\n \"\"\"" ]
Flips the coding of the alleles .
def flip_coded ( self ) : self . genotypes = 2 - self . genotypes self . reference , self . coded = self . coded , self . reference
12,132
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/core.py#L229-L232
[ "def", "pull_session", "(", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "io_loop", "=", "None", ",", "arguments", "=", "None", ")", ":", "coords", "=", "_SessionCoordinates", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ")", "session", "=", "ClientSession", "(", "session_id", "=", "session_id", ",", "websocket_url", "=", "websocket_url_for_server_url", "(", "coords", ".", "url", ")", ",", "io_loop", "=", "io_loop", ",", "arguments", "=", "arguments", ")", "session", ".", "pull", "(", ")", "return", "session" ]
Flips the strand of the alleles .
def flip_strand ( self ) : self . reference = complement_alleles ( self . reference ) self . coded = complement_alleles ( self . coded ) self . variant . complement_alleles ( )
12,133
https://github.com/pgxcentre/geneparse/blob/f698f9708af4c7962d384a70a5a14006b1cb7108/geneparse/core.py#L234-L238
[ "def", "bootstrap_unihan", "(", "metadata", ",", "options", "=", "{", "}", ")", ":", "options", "=", "merge_dict", "(", "UNIHAN_ETL_DEFAULT_OPTIONS", ".", "copy", "(", ")", ",", "options", ")", "p", "=", "unihan", ".", "Packager", "(", "options", ")", "p", ".", "download", "(", ")", "data", "=", "p", ".", "export", "(", ")", "table", "=", "create_unihan_table", "(", "UNIHAN_FIELDS", ",", "metadata", ")", "metadata", ".", "create_all", "(", ")", "metadata", ".", "bind", ".", "execute", "(", "table", ".", "insert", "(", ")", ",", "data", ")" ]
Convert rotation from axis and angle to matrix representation
def rotvec2mat ( u , phi ) : phi = np . squeeze ( phi ) norm_u = np . linalg . norm ( u ) if norm_u < 1e-12 : raise Exception ( "the rotation vector is equal to zero" ) u = u / norm_u # http://en.wikipedia.org/wiki/Rotation_matrix s = np . sin ( phi ) c = np . cos ( phi ) t = 1 - c ux = u [ 0 ] uy = u [ 1 ] uz = u [ 2 ] res = np . array ( [ [ t * ux * ux + c , t * ux * uy - s * uz , t * ux * uz + s * uy ] , [ t * ux * uy + s * uz , t * uy * uy + c , t * uy * uz - s * ux ] , [ t * ux * uz - s * uy , t * uy * uz + s * ux , t * uz * uz + c ] ] ) return res
12,134
https://github.com/aglie/meerkat/blob/f056a3da7ed3d7cd43edb56a38903cfa146e4b24/meerkat/det2lab_xds.py#L4-L26
[ "def", "edit_config", "(", "filename", ",", "settings", ",", "dry_run", "=", "False", ")", ":", "log", ".", "debug", "(", "\"Reading configuration from %s\"", ",", "filename", ")", "opts", "=", "configparser", ".", "RawConfigParser", "(", ")", "opts", ".", "read", "(", "[", "filename", "]", ")", "for", "section", ",", "options", "in", "settings", ".", "items", "(", ")", ":", "if", "options", "is", "None", ":", "log", ".", "info", "(", "\"Deleting section [%s] from %s\"", ",", "section", ",", "filename", ")", "opts", ".", "remove_section", "(", "section", ")", "else", ":", "if", "not", "opts", ".", "has_section", "(", "section", ")", ":", "log", ".", "debug", "(", "\"Adding new section [%s] to %s\"", ",", "section", ",", "filename", ")", "opts", ".", "add_section", "(", "section", ")", "for", "option", ",", "value", "in", "options", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "log", ".", "debug", "(", "\"Deleting %s.%s from %s\"", ",", "section", ",", "option", ",", "filename", ")", "opts", ".", "remove_option", "(", "section", ",", "option", ")", "if", "not", "opts", ".", "options", "(", "section", ")", ":", "log", ".", "info", "(", "\"Deleting empty [%s] section from %s\"", ",", "section", ",", "filename", ")", "opts", ".", "remove_section", "(", "section", ")", "else", ":", "log", ".", "debug", "(", "\"Setting %s.%s to %r in %s\"", ",", "section", ",", "option", ",", "value", ",", "filename", ")", "opts", ".", "set", "(", "section", ",", "option", ",", "value", ")", "log", ".", "info", "(", "\"Writing %s\"", ",", "filename", ")", "if", "not", "dry_run", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "opts", ".", "write", "(", "f", ")" ]
Converts pixels coordinates from the frame into q - vector
def det2lab_xds ( pixels_coord , frame_number , starting_frame , starting_angle , oscillation_angle , rotation_axis , wavelength , wavevector , NX , NY , pixelsize_x , pixelsize_y , distance_to_detector , x_center , y_center , detector_x , detector_y , detector_normal , * * kwargs ) : array_shape = ( 1 , 3 ) if detector_x . shape == array_shape : detector_x = detector_x . T detector_y = detector_y . T detector_normal = detector_normal . T if wavevector . shape == array_shape : wavevector = wavevector . T if rotation_axis . shape == array_shape : rotation_axis = rotation_axis . T xmm = ( pixels_coord [ : , [ 0 ] ] - x_center ) * pixelsize_x ymm = ( pixels_coord [ : , [ 1 ] ] - y_center ) * pixelsize_y # find scattering vector of each pixel scattering_vector_mm = np . outer ( xmm , detector_x ) + np . outer ( ymm , detector_y ) + distance_to_detector * np . outer ( np . ones ( shape = xmm . shape ) , detector_normal ) scattering_vector_mm = scattering_vector_mm . T phi = ( frame_number - starting_frame ) * oscillation_angle + starting_angle # calculating norm for each column norms = np . sum ( scattering_vector_mm ** 2. , axis = 0 ) ** ( 1. / 2 ) #deviding scattering vector by its own norm unit_scattering_vector = scattering_vector_mm / norms #subtracting incident beam vector h = unit_scattering_vector / wavelength - np . tile ( wavevector , ( unit_scattering_vector . shape [ 1 ] , 1 ) ) . T #rotating if phi . size == 1 : h = np . dot ( rotvec2mat ( rotation_axis . T , - 2 * np . pi * phi / 360 ) , h ) else : for i in range ( phi . size ) : h [ : , [ i ] ] = np . dot ( rotvec2mat ( rotation_axis . T , - 2 * np . pi * phi [ i ] / 360 ) , h [ : , [ i ] ] ) return h , scattering_vector_mm , unit_scattering_vector
12,135
https://github.com/aglie/meerkat/blob/f056a3da7ed3d7cd43edb56a38903cfa146e4b24/meerkat/det2lab_xds.py#L29-L75
[ "def", "list_configs", "(", ")", ":", "try", ":", "configs", "=", "snapper", ".", "ListConfigs", "(", ")", "return", "dict", "(", "(", "config", "[", "0", "]", ",", "config", "[", "2", "]", ")", "for", "config", "in", "configs", ")", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing configurations: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
function to generate a filter dictionary in which the key is the keyword used in django filter function in string form and the value is the searched value .
def get_query_dict ( self , * * kwargs ) : total_cols = ensure ( int , kwargs . get ( 'total_cols' , [ 0 ] ) [ 0 ] , 0 ) mapping = self . mapping filter_dict = defaultdict ( dict ) # set up the starter, since sometimes we start the enumeration from '1' starter = mapping . keys ( ) [ 0 ] for i in range ( starter , total_cols ) : key = 'columns[{index}]' . format ( index = i ) if kwargs . get ( key + '[searchable]' , [ 0 ] ) [ 0 ] != 'true' : continue search_value = kwargs . get ( key + '[search][value]' , [ '' ] ) [ 0 ] . strip ( ) if not search_value : continue enum_item = mapping . from_key ( i ) filter_obj = enum_item . extra if type ( filter_obj ) is tuple and len ( filter_obj ) == 2 : filter_func , filter_key = filter_obj filter_dict [ filter_func ] [ filter_key ] = search_value elif type ( filter_obj ) is str : filter_dict [ 'filter' ] [ filter_obj ] = search_value else : raise ValueError ( "Invalid filter key." ) return filter_dict
12,136
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L199-L230
[ "def", "RMSError", "(", "self", ")", ":", "tss", "=", "self", ".", "TSSError", "(", ")", "return", "math", ".", "sqrt", "(", "tss", "/", "self", ".", "size", ")" ]
function to get the order key to apply it in the filtered queryset
def get_order_key ( self , * * kwargs ) : # get the mapping enumeration class from Meta class mapping = self . mapping # use the first element in the enumeration as default order column order_column = kwargs . get ( 'order[0][column]' , [ mapping . keys ( ) [ 0 ] ] ) [ 0 ] order_column = ensure ( int , order_column , mapping . keys ( ) [ 0 ] ) order = kwargs . get ( 'order[0][dir]' , [ 'asc' ] ) [ 0 ] order_key = mapping . from_key ( order_column ) . label # django orm '-' -> desc if order == 'desc' : order_key = '-' + order_key return order_key
12,137
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L232-L252
[ "def", "_sendStatCmd", "(", "self", ",", "cmd", ")", ":", "try", ":", "self", ".", "_conn", ".", "write", "(", "\"%s\\r\\n\"", "%", "cmd", ")", "regex", "=", "re", ".", "compile", "(", "'^(END|ERROR)\\r\\n'", ",", "re", ".", "MULTILINE", ")", "(", "idx", ",", "mobj", ",", "text", ")", "=", "self", ".", "_conn", ".", "expect", "(", "[", "regex", ",", "]", ",", "self", ".", "_timeout", ")", "#@UnusedVariable", "except", ":", "raise", "Exception", "(", "\"Communication with %s failed\"", "%", "self", ".", "_instanceName", ")", "if", "mobj", "is", "not", "None", ":", "if", "mobj", ".", "group", "(", "1", ")", "==", "'END'", ":", "return", "text", ".", "splitlines", "(", ")", "[", ":", "-", "1", "]", "elif", "mobj", ".", "group", "(", "1", ")", "==", "'ERROR'", ":", "raise", "Exception", "(", "\"Protocol error in communication with %s.\"", "%", "self", ".", "_instanceName", ")", "else", ":", "raise", "Exception", "(", "\"Connection with %s timed out.\"", "%", "self", ".", "_instanceName", ")" ]
function to apply the pre search condition to the queryset to narrow down the queryset s size
def filtering ( queryset , query_dict ) : # apply pre_search_condition for key , value in query_dict . items ( ) : assert hasattr ( queryset , key ) , "Parameter 'query_dict' contains" " non-existent attribute." if isinstance ( value , list ) : queryset = getattr ( queryset , key ) ( * value ) elif isinstance ( value , dict ) : queryset = getattr ( queryset , key ) ( * * value ) else : queryset = getattr ( queryset , key ) ( value ) return queryset
12,138
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L255-L275
[ "def", "recover_and_supervise", "(", "recovery_file", ")", ":", "try", ":", "logging", ".", "info", "(", "\"Attempting to recover Supervisor data from \"", "+", "recovery_file", ")", "with", "open", "(", "recovery_file", ")", "as", "rf", ":", "recovery_data", "=", "json", ".", "load", "(", "rf", ")", "monitor_data", "=", "recovery_data", "[", "'monitor_data'", "]", "dependencies", "=", "recovery_data", "[", "'dependencies'", "]", "args", "=", "recovery_data", "[", "'args'", "]", "except", ":", "logging", ".", "error", "(", "\"Could not recover monitor data, exiting...\"", ")", "return", "1", "logging", ".", "info", "(", "\"Data successfully loaded, resuming Supervisor\"", ")", "supervise_until_complete", "(", "monitor_data", ",", "dependencies", ",", "args", ",", "recovery_file", ")" ]
function to slice the queryset according to the display length
def slicing ( queryset , * * kwargs ) : # if the length is -1, we need to display all the records # otherwise, just slicing the queryset length = ensure ( int , kwargs . get ( 'length' , [ 0 ] ) [ 0 ] , 0 ) start = ensure ( int , kwargs . get ( 'start' , [ 0 ] ) [ 0 ] , 0 ) if length >= 0 : queryset = queryset [ start : start + length ] return queryset
12,139
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L278-L292
[ "def", "get_user", "(", "self", ",", "user_id", ")", ":", "msg", "=", "self", ".", "__request", "(", "'user/{}'", ".", "format", "(", "str", "(", "user_id", ")", ",", ")", ")", "status_code", "=", "self", ".", "__get_status_code", "(", "msg", ")", "if", "(", "status_code", "==", "200", ")", ":", "pairs", "=", "{", "}", "lines", "=", "msg", ".", "split", "(", "'\\n'", ")", "if", "(", "len", "(", "lines", ")", ">", "2", ")", "and", "self", ".", "RE_PATTERNS", "[", "'does_not_exist_pattern'", "]", ".", "match", "(", "lines", "[", "2", "]", ")", ":", "return", "None", "for", "line", "in", "lines", "[", "2", ":", "]", ":", "if", "': '", "in", "line", ":", "header", ",", "content", "=", "line", ".", "split", "(", "': '", ",", "1", ")", "pairs", "[", "header", ".", "strip", "(", ")", "]", "=", "content", ".", "strip", "(", ")", "return", "pairs", "else", ":", "raise", "UnexpectedMessageFormat", "(", "'Received status code is {:d} instead of 200.'", ".", "format", "(", "status_code", ")", ")" ]
intends to process the queries sent by data tables package in frontend . The model_cls indicates the model class get_query_dict is a function implemented by you such that it can return a query dictionary in which the key is the query keyword in str form and the value is the queried value
def query_by_args ( self , pre_search_condition = None , * * kwargs ) : if pre_search_condition and not isinstance ( pre_search_condition , OrderedDict ) : raise TypeError ( "Parameter 'pre_search_condition' must be an OrderedDict." ) # extract requisite parameters from kwargs draw = ensure ( int , kwargs . get ( 'draw' , [ 0 ] ) [ 0 ] , 0 ) # just implement the get_query_dict function query_dict = self . get_query_dict ( * * kwargs ) order_key = self . get_order_key ( * * kwargs ) # get the model from the serializer parameter model_class = self . serializer . Meta . model # get the objects queryset = model_class . objects # apply the pre search condition if it exists if pre_search_condition : queryset = self . filtering ( queryset , pre_search_condition ) else : queryset = queryset . all ( ) # number of the total records total = queryset . count ( ) # if the query dict not empty, then apply the query dict if query_dict : queryset = self . filtering ( queryset , query_dict ) # number of the records after applying the query count = queryset . count ( ) # order the queryset queryset = queryset . order_by ( order_key ) # slice the queryset queryset = self . slicing ( queryset , * * kwargs ) return { 'items' : queryset , 'count' : count , 'total' : total , 'draw' : draw }
12,140
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L294-L345
[ "def", "generation", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "state", "is", "not", "MemberState", ".", "STABLE", ":", "return", "None", "return", "self", ".", "_generation" ]
function to be called outside to get the footer search condition apply the search in DB and render the serialized result .
def process ( self , pre_search_condition = None , * * kwargs ) : records = self . query_by_args ( pre_search_condition = pre_search_condition , * * kwargs ) serializer = self . serializer ( records [ 'items' ] , many = True ) result = { 'data' : serializer . data , 'draw' : records [ 'draw' ] , 'recordsTotal' : records [ 'total' ] , 'recordsFiltered' : records [ 'count' ] , } return result
12,141
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/datatables.py#L347-L367
[ "def", "diff_sizes", "(", "a", ",", "b", ",", "progressbar", "=", "None", ")", ":", "difference", "=", "[", "]", "for", "i", "in", "a", ".", "identifiers", ":", "a_size", "=", "a", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "b_size", "=", "b", ".", "item_properties", "(", "i", ")", "[", "\"size_in_bytes\"", "]", "if", "a_size", "!=", "b_size", ":", "difference", ".", "append", "(", "(", "i", ",", "a_size", ",", "b_size", ")", ")", "if", "progressbar", ":", "progressbar", ".", "update", "(", "1", ")", "return", "difference" ]
Convert plain dictionary to MutationDict
def coerce ( cls , key , value ) : self = MutationDict ( ( k , MutationObj . coerce ( key , v ) ) for ( k , v ) in value . items ( ) ) self . _key = key return self
12,142
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/sql.py#L361-L365
[ "def", "stop", "(", "self", ")", ":", "# pylint: disable=no-self-use", "cherrypy", ".", "log", "(", "\"Stopping CherryPy engine (current state: %s)...\"", "%", "cherrypy", ".", "engine", ".", "state", ")", "try", ":", "cherrypy", ".", "engine", ".", "exit", "(", ")", "except", "RuntimeWarning", ":", "pass", "except", "SystemExit", ":", "cherrypy", ".", "log", "(", "'SystemExit raised: shutting down bus'", ")", "cherrypy", ".", "log", "(", "\"Stopped\"", ")" ]
Convert plain list to MutationList
def coerce ( cls , key , value ) : self = MutationList ( ( MutationObj . coerce ( key , v ) for v in value ) ) self . _key = key return self
12,143
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/sql.py#L378-L382
[ "def", "_get_repo_info", "(", "alias", ",", "repos_cfg", "=", "None", ",", "root", "=", "None", ")", ":", "try", ":", "meta", "=", "dict", "(", "(", "repos_cfg", "or", "_get_configured_repos", "(", "root", "=", "root", ")", ")", ".", "items", "(", "alias", ")", ")", "meta", "[", "'alias'", "]", "=", "alias", "for", "key", ",", "val", "in", "six", ".", "iteritems", "(", "meta", ")", ":", "if", "val", "in", "[", "'0'", ",", "'1'", "]", ":", "meta", "[", "key", "]", "=", "int", "(", "meta", "[", "key", "]", ")", "==", "1", "elif", "val", "==", "'NONE'", ":", "meta", "[", "key", "]", "=", "None", "return", "meta", "except", "(", "ValueError", ",", "configparser", ".", "NoSectionError", ")", ":", "return", "{", "}" ]
Get the vector structure as a DNA regex pattern .
def structure ( cls ) : # type: () -> Text downstream = cls . cutter . elucidate ( ) upstream = str ( Seq ( downstream ) . reverse_complement ( ) ) return "" . join ( [ upstream . replace ( "^" , ")(" ) . replace ( "_" , "(" ) , "N*" , downstream . replace ( "^" , ")(" ) . replace ( "_" , ")" ) , ] )
12,144
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/vectors.py#L39-L60
[ "def", "getWorkersName", "(", "data", ")", ":", "names", "=", "[", "fichier", "for", "fichier", "in", "data", ".", "keys", "(", ")", "]", "names", ".", "sort", "(", ")", "try", ":", "names", ".", "remove", "(", "\"broker\"", ")", "except", "ValueError", ":", "pass", "return", "names" ]
Get the placeholder sequence in the vector .
def placeholder_sequence ( self ) : # type: () -> SeqRecord if self . cutter . is_3overhang ( ) : return self . _match . group ( 2 ) + self . overhang_end ( ) else : return self . overhang_start ( ) + self . _match . group ( 2 )
12,145
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/vectors.py#L74-L86
[ "def", "connection_made", "(", "self", ",", "transport", ":", "asyncio", ".", "BaseTransport", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"%s - event = connection_made(%s)\"", ",", "self", ".", "side", ",", "transport", ")", "# mypy thinks transport is a BaseTransport, not a Transport.", "transport", ".", "set_write_buffer_limits", "(", "self", ".", "write_limit", ")", "# type: ignore", "super", "(", ")", ".", "connection_made", "(", "transport", ")" ]
Get the target sequence in the vector .
def target_sequence ( self ) : # type: () -> SeqRecord if self . cutter . is_3overhang ( ) : start , end = self . _match . span ( 2 ) [ 0 ] , self . _match . span ( 3 ) [ 1 ] else : start , end = self . _match . span ( 1 ) [ 0 ] , self . _match . span ( 2 ) [ 1 ] return add_as_source ( self . record , ( self . record << start ) [ end - start : ] )
12,146
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/vectors.py#L88-L99
[ "def", "perturbed_contents", "(", "self", ")", ":", "animal", "=", "json", ".", "loads", "(", "self", ".", "contents", ")", "for", "prop", ",", "prop_range", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "range", "=", "prop_range", "[", "1", "]", "-", "prop_range", "[", "0", "]", "jittered", "=", "animal", "[", "prop", "]", "+", "random", ".", "gauss", "(", "0", ",", "0.1", "*", "range", ")", "animal", "[", "prop", "]", "=", "max", "(", "min", "(", "jittered", ",", "prop_range", "[", "1", "]", ")", ",", "prop_range", "[", "0", "]", ")", "return", "json", ".", "dumps", "(", "animal", ")" ]
Assemble the provided modules into the vector .
def assemble ( self , module , * modules , * * kwargs ) : # type: (AbstractModule, *AbstractModule, **Any) -> SeqRecord mgr = AssemblyManager ( vector = self , modules = [ module ] + list ( modules ) , name = kwargs . get ( "name" , "assembly" ) , id_ = kwargs . get ( "id" , "assembly" ) , ) return mgr . assemble ( )
12,147
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/core/vectors.py#L108-L145
[ "def", "get_pore_surface_parameters", "(", "surface_area", ")", ":", "PoreSurfaceParameters", "=", "DataFactory", "(", "'phtools.surface'", ")", "d", "=", "{", "'accessible_surface_area'", ":", "surface_area", ".", "get_dict", "(", ")", "[", "'ASA_A^2'", "]", ",", "'target_volume'", ":", "40e3", ",", "'sampling_method'", ":", "'random'", ",", "}", "return", "PoreSurfaceParameters", "(", "dict", "=", "d", ")" ]
Configure the component
async def onConnect ( self ) : # Add extra attribute # This allows for following crossbar/autobahn spec # without changing legacy configuration if not hasattr ( self . config , 'extra' ) : original_config = { 'config' : self . config } self . config = objdict ( self . config ) setattr ( self . config , 'extra' , original_config ) self . config . extra [ 'handlers' ] = self . handlers # setup transport host self . transport_host = self . config . extra [ 'config' ] [ 'transport_host' ] # subscription setup self . subscribe_options = SubscribeOptions ( * * self . config . extra [ 'config' ] [ 'sub_options' ] ) self . replay_events = self . config . extra [ 'config' ] [ 'replay_events' ] # publishing setup self . publish_topic = self . config . extra [ 'config' ] [ 'publish_topic' ] [ 'topic' ] self . publish_options = PublishOptions ( * * self . config . extra [ 'config' ] [ 'pub_options' ] ) # setup callback self . handlers = self . config . extra [ 'handlers' ] # optional subscribed topics from config.json self . subscribed_topics = self . config . extra [ 'config' ] [ 'subscribed_topics' ] # put name on session self . name = self . config . extra [ 'config' ] [ 'name' ] # setup db pool - optionally if self . config . extra [ 'config' ] [ 'pub_options' ] [ 'retain' ] is True : self . pool = await asyncpg . create_pool ( user = EVENT_DB_USER , password = EVENT_DB_PASS , host = EVENT_DB_HOST , database = EVENT_DB_NAME ) # Handle non crossbar drivers try : self . join ( self . config . realm ) except AttributeError : pass
12,148
https://github.com/eventifyio/eventify/blob/0e519964a56bd07a879b266f21f177749c63aaed/eventify/drivers/base.py#L20-L66
[ "def", "start", "(", "st_reg_number", ")", ":", "#st_reg_number = str(st_reg_number)", "weights", "=", "[", "4", ",", "3", ",", "2", ",", "9", ",", "8", ",", "7", ",", "6", ",", "5", ",", "4", ",", "3", ",", "2", "]", "digits", "=", "st_reg_number", "[", ":", "len", "(", "st_reg_number", ")", "-", "2", "]", "check_digits", "=", "st_reg_number", "[", "-", "2", ":", "]", "divisor", "=", "11", "if", "len", "(", "st_reg_number", ")", ">", "13", ":", "return", "False", "sum_total", "=", "0", "for", "i", "in", "range", "(", "len", "(", "digits", ")", ")", ":", "sum_total", "=", "sum_total", "+", "int", "(", "digits", "[", "i", "]", ")", "*", "weights", "[", "i", "]", "rest_division", "=", "sum_total", "%", "divisor", "first_digit", "=", "divisor", "-", "rest_division", "if", "first_digit", "==", "10", "or", "first_digit", "==", "11", ":", "first_digit", "=", "0", "if", "str", "(", "first_digit", ")", "!=", "check_digits", "[", "0", "]", ":", "return", "False", "digits", "=", "digits", "+", "str", "(", "first_digit", ")", "weights", "=", "[", "5", "]", "+", "weights", "sum_total", "=", "0", "for", "i", "in", "range", "(", "len", "(", "digits", ")", ")", ":", "sum_total", "=", "sum_total", "+", "int", "(", "digits", "[", "i", "]", ")", "*", "weights", "[", "i", "]", "rest_division", "=", "sum_total", "%", "divisor", "second_digit", "=", "divisor", "-", "rest_division", "if", "second_digit", "==", "10", "or", "second_digit", "==", "11", ":", "second_digit", "=", "0", "return", "str", "(", "first_digit", ")", "+", "str", "(", "second_digit", ")", "==", "check_digits" ]
Goes back to the master branch deletes the current branch locally and remotely .
def getback ( config , force = False ) : repo = config . repo active_branch = repo . active_branch if active_branch . name == "master" : error_out ( "You're already on the master branch." ) if repo . is_dirty ( ) : error_out ( 'Repo is "dirty". ({})' . format ( ", " . join ( [ repr ( x . b_path ) for x in repo . index . diff ( None ) ] ) ) ) branch_name = active_branch . name state = read ( config . configfile ) origin_name = state . get ( "ORIGIN_NAME" , "origin" ) upstream_remote = None fork_remote = None for remote in repo . remotes : if remote . name == origin_name : # remote.pull() upstream_remote = remote break if not upstream_remote : error_out ( "No remote called {!r} found" . format ( origin_name ) ) # Check out master repo . heads . master . checkout ( ) upstream_remote . pull ( repo . heads . master ) # Is this one of the merged branches?! # XXX I don't know how to do this "natively" with GitPython. merged_branches = [ x . strip ( ) for x in repo . git . branch ( "--merged" ) . splitlines ( ) if x . strip ( ) and not x . strip ( ) . startswith ( "*" ) ] was_merged = branch_name in merged_branches certain = was_merged or force if not certain : # Need to ask the user. # XXX This is where we could get smart and compare this branch # with the master. certain = ( input ( "Are you certain {} is actually merged? [Y/n] " . format ( branch_name ) ) . lower ( ) . strip ( ) != "n" ) if not certain : return 1 if was_merged : repo . git . branch ( "-d" , branch_name ) else : repo . git . branch ( "-D" , branch_name ) fork_remote = None for remote in repo . remotes : if remote . name == state . get ( "FORK_NAME" ) : fork_remote = remote break if fork_remote : fork_remote . push ( ":" + branch_name ) info_out ( "Remote branch on fork deleted too." )
12,149
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/getback/gg_getback.py#L11-L79
[ "def", "GetFormatsWithSignatures", "(", "cls", ",", "parser_filter_expression", "=", "None", ")", ":", "specification_store", "=", "specification", ".", "FormatSpecificationStore", "(", ")", "remainder_list", "=", "[", "]", "for", "parser_name", ",", "parser_class", "in", "cls", ".", "GetParsers", "(", "parser_filter_expression", "=", "parser_filter_expression", ")", ":", "format_specification", "=", "parser_class", ".", "GetFormatSpecification", "(", ")", "if", "format_specification", "and", "format_specification", ".", "signatures", ":", "specification_store", ".", "AddSpecification", "(", "format_specification", ")", "# The plist parser is a special case, where it both defines a signature", "# and also needs to be applied 'brute-force' to non-matching files,", "# as the signature matches binary plists, but not XML or JSON plists.", "if", "parser_name", "==", "'plist'", ":", "remainder_list", ".", "append", "(", "parser_name", ")", "else", ":", "remainder_list", ".", "append", "(", "parser_name", ")", "return", "specification_store", ",", "remainder_list" ]
Return a Bus object for a certain vehicle ID vid using API instance api .
def get ( _class , api , vid ) : busses = api . vehicles ( vid = vid ) [ 'vehicle' ] return _class . fromapi ( api , api . vehicles ( vid = vid ) [ 'vehicle' ] )
12,150
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L13-L19
[ "def", "rename_sectors", "(", "self", ",", "sectors", ")", ":", "if", "type", "(", "sectors", ")", "is", "list", ":", "sectors", "=", "{", "old", ":", "new", "for", "old", ",", "new", "in", "zip", "(", "self", ".", "get_sectors", "(", ")", ",", "sectors", ")", "}", "for", "df", "in", "self", ".", "get_DataFrame", "(", "data", "=", "True", ")", ":", "df", ".", "rename", "(", "index", "=", "sectors", ",", "columns", "=", "sectors", ",", "inplace", "=", "True", ")", "try", ":", "for", "ext", "in", "self", ".", "get_extensions", "(", "data", "=", "True", ")", ":", "for", "df", "in", "ext", ".", "get_DataFrame", "(", "data", "=", "True", ")", ":", "df", ".", "rename", "(", "index", "=", "sectors", ",", "columns", "=", "sectors", ",", "inplace", "=", "True", ")", "except", ":", "pass", "self", ".", "meta", ".", "_add_modify", "(", "\"Changed sector names\"", ")", "return", "self" ]
Return a Bus object from an API response dict .
def fromapi ( _class , api , apiresponse ) : bus = apiresponse return _class ( api = api , vid = bus [ 'vid' ] , timeupdated = datetime . strptime ( bus [ 'tmstmp' ] , api . STRPTIME ) , lat = float ( bus [ 'lat' ] ) , lng = float ( bus [ 'lon' ] ) , heading = bus [ 'hdg' ] , pid = bus [ 'pid' ] , intotrip = bus [ 'pdist' ] , route = bus [ 'rt' ] , destination = bus [ 'des' ] , speed = bus [ 'spd' ] , delay = bus . get ( 'dly' ) or False )
12,151
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L22-L40
[ "def", "beacon", "(", "config", ")", ":", "parts", "=", "psutil", ".", "disk_partitions", "(", "all", "=", "True", ")", "ret", "=", "[", "]", "for", "mounts", "in", "config", ":", "mount", "=", "next", "(", "iter", "(", "mounts", ")", ")", "# Because we're using regular expressions", "# if our mount doesn't end with a $, insert one.", "mount_re", "=", "mount", "if", "not", "mount", ".", "endswith", "(", "'$'", ")", ":", "mount_re", "=", "'{0}$'", ".", "format", "(", "mount", ")", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "# mount_re comes in formatted with a $ at the end", "# can be `C:\\\\$` or `C:\\\\\\\\$`", "# re string must be like `C:\\\\\\\\` regardless of \\\\ or \\\\\\\\", "# also, psutil returns uppercase", "mount_re", "=", "re", ".", "sub", "(", "r':\\\\\\$'", ",", "r':\\\\\\\\'", ",", "mount_re", ")", "mount_re", "=", "re", ".", "sub", "(", "r':\\\\\\\\\\$'", ",", "r':\\\\\\\\'", ",", "mount_re", ")", "mount_re", "=", "mount_re", ".", "upper", "(", ")", "for", "part", "in", "parts", ":", "if", "re", ".", "match", "(", "mount_re", ",", "part", ".", "mountpoint", ")", ":", "_mount", "=", "part", ".", "mountpoint", "try", ":", "_current_usage", "=", "psutil", ".", "disk_usage", "(", "_mount", ")", "except", "OSError", ":", "log", ".", "warning", "(", "'%s is not a valid mount point.'", ",", "_mount", ")", "continue", "current_usage", "=", "_current_usage", ".", "percent", "monitor_usage", "=", "mounts", "[", "mount", "]", "if", "'%'", "in", "monitor_usage", ":", "monitor_usage", "=", "re", ".", "sub", "(", "'%'", ",", "''", ",", "monitor_usage", ")", "monitor_usage", "=", "float", "(", "monitor_usage", ")", "if", "current_usage", ">=", "monitor_usage", ":", "ret", ".", "append", "(", "{", "'diskusage'", ":", "current_usage", ",", "'mount'", ":", "_mount", "}", ")", "return", "ret" ]
Update this bus by creating a new one and transplanting dictionaries .
def update ( self ) : vehicle = self . api . vehicles ( vid = self . vid ) [ 'vehicle' ] newbus = self . fromapi ( self . api , vehicle ) self . __dict__ = newbus . __dict__ del newbus
12,152
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L61-L66
[ "def", "get_remaining_width", "(", "sample_string", ",", "max_terminal_width", "=", "None", ")", ":", "if", "max_terminal_width", "is", "not", "None", ":", "available_width", "=", "min", "(", "terminal_width", "(", ")", ",", "max_terminal_width", ")", "else", ":", "available_width", "=", "terminal_width", "(", ")", "return", "available_width", "-", "len", "(", "sample_string", ")" ]
Generator that yields prediction objects from an API response .
def predictions ( self ) : for prediction in self . api . predictions ( vid = self . vid ) [ 'prd' ] : pobj = Prediction . fromapi ( self . api , prediction ) pobj . _busobj = self yield pobj
12,153
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L73-L78
[ "def", "change_column_length", "(", "table", ":", "Table", ",", "column", ":", "Column", ",", "length", ":", "int", ",", "engine", ":", "Engine", ")", "->", "None", ":", "if", "column", ".", "type", ".", "length", "<", "length", ":", "print", "(", "\"Changing length of {} from {} to {}\"", ".", "format", "(", "column", ",", "column", ".", "type", ".", "length", ",", "length", ")", ")", "column", ".", "type", ".", "length", "=", "length", "column_name", "=", "column", ".", "name", "column_type", "=", "column", ".", "type", ".", "compile", "(", "engine", ".", "dialect", ")", "engine", ".", "execute", "(", "'ALTER TABLE {table} ALTER COLUMN {column_name} TYPE {column_type}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ")" ]
Return the next stop for this bus .
def next_stop ( self ) : p = self . api . predictions ( vid = self . vid ) [ 'prd' ] pobj = Prediction . fromapi ( self . api , p [ 0 ] ) pobj . _busobj = self return pobj
12,154
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L81-L86
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "tracked_model", ".", "models", "import", "History", ",", "RequestInfo", "if", "self", ".", "pk", ":", "action", "=", "ActionType", ".", "UPDATE", "changes", "=", "None", "else", ":", "action", "=", "ActionType", ".", "CREATE", "changes", "=", "serializer", ".", "dump_model", "(", "self", ")", "request", "=", "kwargs", ".", "pop", "(", "'request'", ",", "None", ")", "track_token", "=", "kwargs", ".", "pop", "(", "'track_token'", ",", "None", ")", "super", "(", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "not", "changes", ":", "changes", "=", "self", ".", "_tracked_model_diff", "(", ")", "if", "changes", ":", "hist", "=", "History", "(", ")", "hist", ".", "model_name", "=", "self", ".", "_meta", ".", "model", ".", "__name__", "hist", ".", "app_label", "=", "self", ".", "_meta", ".", "app_label", "hist", ".", "table_name", "=", "self", ".", "_meta", ".", "db_table", "hist", ".", "table_id", "=", "self", ".", "pk", "hist", ".", "change_log", "=", "serializer", ".", "to_json", "(", "changes", ")", "hist", ".", "action_type", "=", "action", "if", "request", ":", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "hist", ".", "revision_author", "=", "request", ".", "user", "req_info", "=", "RequestInfo", ".", "create_or_get_from_request", "(", "request", ")", "hist", ".", "revision_request", "=", "req_info", "elif", "track_token", ":", "hist", ".", "revision_author_id", "=", "track_token", ".", "user_pk", "hist", ".", "revision_request_id", "=", "track_token", ".", "request_pk", "hist", ".", "save", "(", ")", "self", ".", "_tracked_model_initial_state", "=", "serializer", ".", "dump_model", "(", "self", ")" ]
Return a Route object for route rt using API instance api .
def get ( _class , api , rt ) : if not _class . all_routes : _class . all_routes = _class . update_list ( api , api . routes ( ) [ 'route' ] ) return _class . all_routes [ str ( rt ) ]
12,155
https://github.com/nhfruchter/pgh-bustime/blob/b915e8fea28541612f0e79783c2cf12fd3daaac0/pghbustime/datatypes.py#L114-L122
[ "def", "json", "(", "self", ",", "*", "*", "kwargs", ")", ":", "body", "=", "self", ".", "_decompress", "(", "self", ".", "encoding", ")", "return", "_json", ".", "loads", "(", "body", ",", "*", "*", "kwargs", ")" ]
Ensures a path is parsed .
def _normalise_path ( path : Union [ str , pathlib . Path ] ) -> pathlib . Path : if isinstance ( path , str ) : return pathlib . Path ( path ) return path
12,156
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L30-L39
[ "def", "select_models", "(", "clas", ",", "pool_or_cursor", ",", "*", "*", "kwargs", ")", ":", "if", "'columns'", "in", "kwargs", ":", "raise", "ValueError", "(", "\"don't pass 'columns' to select_models\"", ")", "return", "(", "set_options", "(", "pool_or_cursor", ",", "clas", "(", "*", "row", ")", ")", "for", "row", "in", "clas", ".", "select", "(", "pool_or_cursor", ",", "*", "*", "kwargs", ")", ")" ]
Retrieve a root directory object from a path .
def root ( path : Union [ str , pathlib . Path ] ) -> _Root : return _Root . from_path ( _normalise_path ( path ) )
12,157
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L42-L49
[ "def", "_get_regional_term", "(", "self", ",", "C", ",", "imt", ",", "vs30", ",", "rrup", ")", ":", "f3", "=", "interpolate", ".", "interp1d", "(", "[", "150", ",", "250", ",", "350", ",", "450", ",", "600", ",", "850", ",", "1150", ",", "2000", "]", ",", "[", "C", "[", "'a36'", "]", ",", "C", "[", "'a37'", "]", ",", "C", "[", "'a38'", "]", ",", "C", "[", "'a39'", "]", ",", "C", "[", "'a40'", "]", ",", "C", "[", "'a41'", "]", ",", "C", "[", "'a42'", "]", ",", "C", "[", "'a42'", "]", "]", ",", "kind", "=", "'linear'", ")", "return", "f3", "(", "vs30", ")", "+", "C", "[", "'a29'", "]", "*", "rrup" ]
Retrieve an appropriate entity object from a path .
def entity ( path : Union [ str , pathlib . Path ] ) -> _Entity : return _Entity . from_path ( _normalise_path ( path ) )
12,158
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L52-L60
[ "def", "_clamp_string", "(", "self", ",", "row_item", ",", "column_index", ",", "delimiter", "=", "''", ")", ":", "width", "=", "(", "self", ".", "_table", ".", "column_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "left_padding_widths", "[", "column_index", "]", "-", "self", ".", "_table", ".", "right_padding_widths", "[", "column_index", "]", ")", "if", "termwidth", "(", "row_item", ")", "<=", "width", ":", "return", "row_item", "else", ":", "if", "width", "-", "len", "(", "delimiter", ")", ">=", "0", ":", "clamped_string", "=", "(", "textwrap", "(", "row_item", ",", "width", "-", "len", "(", "delimiter", ")", ")", "[", "0", "]", "+", "delimiter", ")", "else", ":", "clamped_string", "=", "delimiter", "[", ":", "width", "]", "return", "clamped_string" ]
Compare two paths .
def compare ( left : Union [ str , pathlib . Path , _Entity ] , right : Union [ str , pathlib . Path , _Entity ] ) -> Comparison : def normalise ( param : Union [ str , pathlib . Path , _Entity ] ) -> _Entity : """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance ( param , str ) : param = pathlib . Path ( param ) if isinstance ( param , pathlib . Path ) : param = _Entity . from_path ( param ) return param return Comparison . compare ( normalise ( left ) , normalise ( right ) )
12,159
https://github.com/gebn/wood/blob/efc71879890dbd2f2d7a0b1a65ed22a0843139dd/wood/__init__.py#L63-L88
[ "def", "checkIfAvailable", "(", "self", ",", "dateTime", "=", "timezone", ".", "now", "(", ")", ")", ":", "return", "(", "self", ".", "startTime", ">=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__closeBookingDays'", ")", ")", "and", "self", ".", "startTime", "<=", "dateTime", "+", "timedelta", "(", "days", "=", "getConstant", "(", "'privateLessons__openBookingDays'", ")", ")", "and", "not", "self", ".", "eventRegistration", "and", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "available", "or", "(", "self", ".", "status", "==", "self", ".", "SlotStatus", ".", "tentative", "and", "getattr", "(", "getattr", "(", "self", ".", "temporaryEventRegistration", ",", "'registration'", ",", "None", ")", ",", "'expirationDate'", ",", "timezone", ".", "now", "(", ")", ")", "<=", "timezone", ".", "now", "(", ")", ")", ")", ")" ]
Loads the instrumental geometry information from the XPARM . XDS or GXPARM . XDS files at the proposed location
def read_XPARM ( path_to_XPARM = '.' ) : if not os . path . exists ( path_to_XPARM ) : raise Exception ( "path " + path_to_XPARM + "does not exist" ) if os . path . isdir ( path_to_XPARM ) : candidate = os . path . join ( path_to_XPARM , 'GXPARM.XDS' ) if os . path . isfile ( candidate ) : path_to_XPARM = candidate else : candidate = os . path . join ( path_to_XPARM , 'XPARM.XDS' ) if os . path . isfile ( candidate ) : path_to_XPARM = candidate else : raise Exception ( "files GXPARM.XDS and XPARM.XDS are not found in the folder " + path_to_XPARM ) with open ( path_to_XPARM ) as f : f . readline ( ) # skip header text = f . read ( ) # parse the rest to numbers f = re . compile ( '-?\d+\.?\d*' ) . finditer ( text ) try : result = dict ( starting_frame = r_get_numbers ( f , 1 ) , starting_angle = r_get_numbers ( f , 1 ) , oscillation_angle = r_get_numbers ( f , 1 ) , rotation_axis = r_get_numbers ( f , 3 ) , wavelength = r_get_numbers ( f , 1 ) , wavevector = r_get_numbers ( f , 3 ) , space_group_nr = r_get_numbers ( f , 1 ) , cell = r_get_numbers ( f , 6 ) , unit_cell_vectors = np . reshape ( r_get_numbers ( f , 9 ) , ( 3 , 3 ) ) , number_of_detector_segments = r_get_numbers ( f , 1 ) , NX = r_get_numbers ( f , 1 ) , NY = r_get_numbers ( f , 1 ) , pixelsize_x = r_get_numbers ( f , 1 ) , pixelsize_y = r_get_numbers ( f , 1 ) , x_center = r_get_numbers ( f , 1 ) , y_center = r_get_numbers ( f , 1 ) , distance_to_detector = r_get_numbers ( f , 1 ) , detector_x = r_get_numbers ( f , 3 ) , detector_y = r_get_numbers ( f , 3 ) , detector_normal = r_get_numbers ( f , 3 ) , detector_segment_crossection = r_get_numbers ( f , 5 ) , detector_segment_geometry = r_get_numbers ( f , 9 ) ) except StopIteration : raise Exception ( 'Wrong format of the XPARM.XDS file' ) # check there is nothing left try : f . next ( ) except StopIteration : pass else : raise Exception ( 'Wrong format of the XPARM.XDS file' ) return result
12,160
https://github.com/aglie/meerkat/blob/f056a3da7ed3d7cd43edb56a38903cfa146e4b24/meerkat/meerkat.py#L18-L82
[ "def", "RemoveConnectedPeer", "(", "self", ",", "peer", ")", ":", "if", "peer", "in", "self", ".", "Peers", ":", "self", ".", "Peers", ".", "remove", "(", "peer", ")" ]
Allows to open the hdf5 file with specified cache size
def create_h5py_with_large_cache ( filename , cache_size_mb ) : # h5py does not allow to control the cache size from the high level # we employ the workaround # sources: #http://stackoverflow.com/questions/14653259/how-to-set-cache-settings-while-using-h5py-high-level-interface #https://groups.google.com/forum/#!msg/h5py/RVx1ZB6LpE4/KH57vq5yw2AJ propfaid = h5py . h5p . create ( h5py . h5p . FILE_ACCESS ) settings = list ( propfaid . get_cache ( ) ) settings [ 2 ] = 1024 * 1024 * cache_size_mb propfaid . set_cache ( * settings ) fid = h5py . h5f . create ( filename , flags = h5py . h5f . ACC_EXCL , fapl = propfaid ) fin = h5py . File ( fid ) return fin
12,161
https://github.com/aglie/meerkat/blob/f056a3da7ed3d7cd43edb56a38903cfa146e4b24/meerkat/meerkat.py#L203-L218
[ "def", "trigger_script", "(", "self", ")", ":", "if", "self", ".", "remote_bridge", ".", "status", "not", "in", "(", "BRIDGE_STATUS", ".", "RECEIVED", ",", ")", ":", "return", "[", "1", "]", "#FIXME: State change", "# This is asynchronous in real life so just cache the error", "try", ":", "self", ".", "remote_bridge", ".", "parsed_script", "=", "UpdateScript", ".", "FromBinary", "(", "self", ".", "_device", ".", "script", ")", "#FIXME: Actually run the script", "self", ".", "remote_bridge", ".", "status", "=", "BRIDGE_STATUS", ".", "IDLE", "except", "Exception", "as", "exc", ":", "self", ".", "_logger", ".", "exception", "(", "\"Error parsing script streamed to device\"", ")", "self", ".", "remote_bridge", ".", "script_error", "=", "exc", "self", ".", "remote_bridge", ".", "error", "=", "1", "# FIXME: Error code", "return", "[", "0", "]" ]
Find features in sequences by locus tag
def find_features ( seqs , locus_tag = "all" , utr_len = 200 ) : found_features = [ ] for seq_i in seqs : for feature in seq_i . features : if feature . type == "CDS" and ( locus_tag == "all" or ( 'locus_tag' in feature . qualifiers and feature . qualifiers [ 'locus_tag' ] [ 0 ] == locus_tag ) ) : start = max ( 0 , feature . location . nofuzzy_start - utr_len ) stop = max ( 0 , feature . location . nofuzzy_end + utr_len ) feature_seq = seq_i . seq [ start : stop ] f_match = FeatureMatch ( feature , feature_seq , feature . strand , utr_len ) found_features . append ( f_match ) return found_features
12,162
https://github.com/kblin/bioinf-helperlibs/blob/3a732d62b4b3cc42675631db886ba534672cb134/helperlibs/bio/featurematch.py#L111-L127
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
Get nest - level of this port
def getLevel ( self ) : lvl = 0 p = self while True : p = p . parent if not isinstance ( p , LPort ) : break lvl += 1 return lvl
12,163
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/containers/lPort.py#L44-L55
[ "async", "def", "jsk_curl", "(", "self", ",", "ctx", ":", "commands", ".", "Context", ",", "url", ":", "str", ")", ":", "# remove embed maskers if present", "url", "=", "url", ".", "lstrip", "(", "\"<\"", ")", ".", "rstrip", "(", "\">\"", ")", "async", "with", "ReplResponseReactor", "(", "ctx", ".", "message", ")", ":", "async", "with", "aiohttp", ".", "ClientSession", "(", ")", "as", "session", ":", "async", "with", "session", ".", "get", "(", "url", ")", "as", "response", ":", "data", "=", "await", "response", ".", "read", "(", ")", "hints", "=", "(", "response", ".", "content_type", ",", "url", ")", "code", "=", "response", ".", "status", "if", "not", "data", ":", "return", "await", "ctx", ".", "send", "(", "f\"HTTP response was empty (status code {code}).\"", ")", "try", ":", "paginator", "=", "WrappedFilePaginator", "(", "io", ".", "BytesIO", "(", "data", ")", ",", "language_hints", "=", "hints", ",", "max_size", "=", "1985", ")", "except", "UnicodeDecodeError", ":", "return", "await", "ctx", ".", "send", "(", "f\"Couldn't determine the encoding of the response. (status code {code})\"", ")", "except", "ValueError", "as", "exc", ":", "return", "await", "ctx", ".", "send", "(", "f\"Couldn't read response (status code {code}), {exc}\"", ")", "interface", "=", "PaginatorInterface", "(", "ctx", ".", "bot", ",", "paginator", ",", "owner", "=", "ctx", ".", "author", ")", "await", "interface", ".", "send_to", "(", "ctx", ")" ]
Normalize a LCSH subject heading prior to indexing .
def normalize_LCSH ( subject ) : # Strip then divide on -- which is a delimiter for LCSH; # rejoin after stripping parts. subject_parts = subject . strip ( ) . split ( '--' ) joined_subject = ' -- ' . join ( [ part . strip ( ) for part in subject_parts ] ) # Check if there is punctuation at the end of the string, # and if not, add a trailing period. if re . search ( r'[^a-zA-Z0-9]$' , joined_subject ) is None : joined_subject = joined_subject + '.' return joined_subject
12,164
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/util.py#L4-L16
[ "def", "absent", "(", "name", ",", "vpc_id", "=", "None", ",", "vpc_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "sg", "=", "__salt__", "[", "'boto_secgroup.get_config'", "]", "(", "name", "=", "name", ",", "group_id", "=", "None", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "vpc_id", "=", "vpc_id", ",", "vpc_name", "=", "vpc_name", ")", "if", "sg", ":", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Security group {0} is set to be removed.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "deleted", "=", "__salt__", "[", "'boto_secgroup.delete'", "]", "(", "name", "=", "name", ",", "group_id", "=", "None", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "vpc_id", "=", "vpc_id", ",", "vpc_name", "=", "vpc_name", ")", "if", "deleted", ":", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "{", "'secgroup'", ":", "sg", "}", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "{", "'secgroup'", ":", "None", "}", "ret", "[", "'comment'", "]", "=", "'Security group {0} deleted.'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete {0} security group.'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'{0} security group does not exist.'", ".", "format", "(", "name", ")", "return", "ret" ]
Normalize a UNTL subject heading for consistency .
def normalize_UNTL ( subject ) : subject = subject . strip ( ) subject = re . sub ( r'[\s]+' , ' ' , subject ) return subject
12,165
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/util.py#L19-L23
[ "def", "delete_logs", "(", "room", ")", ":", "from", "indico_chat", ".", "plugin", "import", "ChatPlugin", "base_url", "=", "ChatPlugin", ".", "settings", ".", "get", "(", "'log_url'", ")", "if", "not", "base_url", "or", "room", ".", "custom_server", ":", "return", "try", ":", "response", "=", "requests", ".", "get", "(", "posixpath", ".", "join", "(", "base_url", ",", "'delete'", ")", ",", "params", "=", "{", "'cr'", ":", "room", ".", "jid", "}", ")", ".", "json", "(", ")", "except", "(", "RequestException", ",", "ValueError", ")", ":", "current_plugin", ".", "logger", ".", "exception", "(", "'Could not delete logs for %s'", ",", "room", ".", "jid", ")", "return", "if", "not", "response", ".", "get", "(", "'success'", ")", ":", "current_plugin", ".", "logger", ".", "warning", "(", "'Could not delete logs for %s: %s'", ",", "room", ".", "jid", ",", "response", ".", "get", "(", "'error'", ")", ")" ]
Normalize a UNTL subject heading to be used in SOLR .
def UNTL_to_encodedUNTL ( subject ) : subject = normalize_UNTL ( subject ) subject = subject . replace ( ' ' , '_' ) subject = subject . replace ( '_-_' , '/' ) return subject
12,166
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/util.py#L26-L31
[ "def", "stop_experiment", "(", "args", ")", ":", "experiment_id_list", "=", "parse_ids", "(", "args", ")", "if", "experiment_id_list", ":", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "for", "experiment_id", "in", "experiment_id_list", ":", "print_normal", "(", "'Stoping experiment %s'", "%", "experiment_id", ")", "nni_config", "=", "Config", "(", "experiment_dict", "[", "experiment_id", "]", "[", "'fileName'", "]", ")", "rest_port", "=", "nni_config", ".", "get_config", "(", "'restServerPort'", ")", "rest_pid", "=", "nni_config", ".", "get_config", "(", "'restServerPid'", ")", "if", "rest_pid", ":", "kill_command", "(", "rest_pid", ")", "tensorboard_pid_list", "=", "nni_config", ".", "get_config", "(", "'tensorboardPidList'", ")", "if", "tensorboard_pid_list", ":", "for", "tensorboard_pid", "in", "tensorboard_pid_list", ":", "try", ":", "kill_command", "(", "tensorboard_pid", ")", "except", "Exception", "as", "exception", ":", "print_error", "(", "exception", ")", "nni_config", ".", "set_config", "(", "'tensorboardPidList'", ",", "[", "]", ")", "print_normal", "(", "'Stop experiment success!'", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'status'", ",", "'STOPPED'", ")", "time_now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ",", "time", ".", "localtime", "(", "time", ".", "time", "(", ")", ")", ")", "experiment_config", ".", "update_experiment", "(", "experiment_id", ",", "'endTime'", ",", "str", "(", "time_now", ")", ")" ]
Normalize UNTL elements by their qualifier .
def untldict_normalizer ( untl_dict , normalizations ) : # Loop through the element types in the UNTL metadata. for element_type , element_list in untl_dict . items ( ) : # A normalization is required for that element type. if element_type in normalizations : # Get the required normalizations for specific qualifiers list. norm_qualifier_list = normalizations . get ( element_type ) # Loop through the element lists within that element type. for element in element_list : # Determine if the qualifier requires normalization. qualifier = element . get ( 'qualifier' , None ) if qualifier in norm_qualifier_list : content = element . get ( 'content' , None ) # Determine if there is normalizing for the element. if element_type in ELEMENT_NORMALIZERS : elem_norms = ELEMENT_NORMALIZERS . get ( element_type , None ) # If the qualified element requires a # normalization and has content, replace the # content with the normalized. if qualifier in elem_norms : if content and content != '' : element [ 'content' ] = elem_norms [ qualifier ] ( content ) return untl_dict
12,167
https://github.com/unt-libraries/pyuntl/blob/f92413302897dab948aac18ee9e482ace0187bd4/pyuntl/util.py#L41-L73
[ "def", "load_projects", "(", "self", ")", ":", "server_config", "=", "Config", ".", "instance", "(", ")", ".", "get_section_config", "(", "\"Server\"", ")", "projects_path", "=", "os", ".", "path", ".", "expanduser", "(", "server_config", ".", "get", "(", "\"projects_path\"", ",", "\"~/GNS3/projects\"", ")", ")", "os", ".", "makedirs", "(", "projects_path", ",", "exist_ok", "=", "True", ")", "try", ":", "for", "project_path", "in", "os", ".", "listdir", "(", "projects_path", ")", ":", "project_dir", "=", "os", ".", "path", ".", "join", "(", "projects_path", ",", "project_path", ")", "if", "os", ".", "path", ".", "isdir", "(", "project_dir", ")", ":", "for", "file", "in", "os", ".", "listdir", "(", "project_dir", ")", ":", "if", "file", ".", "endswith", "(", "\".gns3\"", ")", ":", "try", ":", "yield", "from", "self", ".", "load_project", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "file", ")", ",", "load", "=", "False", ")", "except", "(", "aiohttp", ".", "web_exceptions", ".", "HTTPConflict", ",", "NotImplementedError", ")", ":", "pass", "# Skip not compatible projects", "except", "OSError", "as", "e", ":", "log", ".", "error", "(", "str", "(", "e", ")", ")" ]
Create a new topic branch .
def start ( config , bugnumber = "" ) : repo = config . repo if bugnumber : summary , bugnumber , url = get_summary ( config , bugnumber ) else : url = None summary = None if summary : summary = input ( 'Summary ["{}"]: ' . format ( summary ) ) . strip ( ) or summary else : summary = input ( "Summary: " ) . strip ( ) branch_name = "" if bugnumber : if is_github ( { "bugnumber" : bugnumber , "url" : url } ) : branch_name = "{}-" . format ( bugnumber ) else : branch_name = "{}-" . format ( bugnumber ) def clean_branch_name ( string ) : string = re . sub ( r"\s+" , " " , string ) string = string . replace ( " " , "-" ) string = string . replace ( "->" , "-" ) . replace ( "=>" , "-" ) for each in "@%^&:'\"/(),[]{}!.?`$<>#*;=" : string = string . replace ( each , "" ) string = re . sub ( "-+" , "-" , string ) string = string . strip ( "-" ) return string . lower ( ) . strip ( ) branch_name += clean_branch_name ( summary ) if not branch_name : error_out ( "Must provide a branch name" ) # Check that the branch doesn't already exist found = list ( find ( repo , branch_name , exact = True ) ) if found : error_out ( "There is already a branch called {!r}" . format ( found [ 0 ] . name ) ) new_branch = repo . create_head ( branch_name ) new_branch . checkout ( ) if config . verbose : click . echo ( "Checkout out new branch: {}" . format ( branch_name ) ) save ( config . configfile , summary , branch_name , bugnumber = bugnumber , url = url )
12,168
https://github.com/peterbe/gg/blob/2aace5bdb4a9b1cb65bea717784edf54c63b7bad/gg/builtins/start/gg_start.py#L18-L65
[ "def", "loadFileList", "(", "self", ")", ":", "try", ":", "data", "=", "open", "(", "self", ".", "filelist_file", ",", "'rb'", ")", "except", "IOError", ":", "'''print \"No SRTM cached file list. Creating new one!\"'''", "if", "self", ".", "offline", "==", "0", ":", "self", ".", "createFileList", "(", ")", "return", "try", ":", "self", ".", "filelist", "=", "pickle", ".", "load", "(", "data", ")", "data", ".", "close", "(", ")", "if", "len", "(", "self", ".", "filelist", ")", "<", "self", ".", "min_filelist_len", ":", "self", ".", "filelist", "=", "{", "}", "if", "self", ".", "offline", "==", "0", ":", "self", ".", "createFileList", "(", ")", "except", ":", "'''print \"Unknown error loading cached SRTM file list. Creating new one!\"'''", "if", "self", ".", "offline", "==", "0", ":", "self", ".", "createFileList", "(", ")" ]
Concatenate conditioning vector on feature map axis .
def conv_cond_concat ( x , y ) : x_shapes = x . get_shape ( ) y_shapes = y . get_shape ( ) return tf . concat ( 3 , [ x , y * tf . ones ( [ x_shapes [ 0 ] , x_shapes [ 1 ] , x_shapes [ 2 ] , y_shapes [ 3 ] ] ) ] )
12,169
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/ops.py#L106-L110
[ "def", "schemas_access_for_csv_upload", "(", "self", ")", ":", "if", "not", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ":", "return", "json_error_response", "(", "'No database is allowed for your csv upload'", ")", "db_id", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", "database", "=", "(", "db", ".", "session", ".", "query", "(", "models", ".", "Database", ")", ".", "filter_by", "(", "id", "=", "db_id", ")", ".", "one", "(", ")", ")", "try", ":", "schemas_allowed", "=", "database", ".", "get_schema_access_for_csv_upload", "(", ")", "if", "(", "security_manager", ".", "database_access", "(", "database", ")", "or", "security_manager", ".", "all_datasource_access", "(", ")", ")", ":", "return", "self", ".", "json_response", "(", "schemas_allowed", ")", "# the list schemas_allowed should not be empty here", "# and the list schemas_allowed_processed returned from security_manager", "# should not be empty either,", "# otherwise the database should have been filtered out", "# in CsvToDatabaseForm", "schemas_allowed_processed", "=", "security_manager", ".", "schemas_accessible_by_user", "(", "database", ",", "schemas_allowed", ",", "False", ")", "return", "self", ".", "json_response", "(", "schemas_allowed_processed", ")", "except", "Exception", ":", "return", "json_error_response", "(", "(", "'Failed to fetch schemas allowed for csv upload in this database! '", "'Please contact Superset Admin!\\n\\n'", "'The error message returned was:\\n{}'", ")", ".", "format", "(", "traceback", ".", "format_exc", "(", ")", ")", ")" ]
Concatenates lrelu and square
def lrelu_sq ( x ) : dim = len ( x . get_shape ( ) ) - 1 return tf . concat ( dim , [ lrelu ( x ) , tf . minimum ( tf . abs ( x ) , tf . square ( x ) ) ] )
12,170
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/ops.py#L223-L228
[ "def", "tscube", "(", "self", ",", "prefix", "=", "''", ",", "*", "*", "kwargs", ")", ":", "self", ".", "logger", ".", "info", "(", "'Generating TS cube'", ")", "schema", "=", "ConfigSchema", "(", "self", ".", "defaults", "[", "'tscube'", "]", ")", "schema", ".", "add_option", "(", "'make_plots'", ",", "True", ")", "schema", ".", "add_option", "(", "'write_fits'", ",", "True", ")", "schema", ".", "add_option", "(", "'write_npy'", ",", "True", ")", "config", "=", "schema", ".", "create_config", "(", "self", ".", "config", "[", "'tscube'", "]", ",", "*", "*", "kwargs", ")", "maps", "=", "self", ".", "_make_ts_cube", "(", "prefix", ",", "*", "*", "config", ")", "if", "config", "[", "'make_plots'", "]", ":", "plotter", "=", "plotting", ".", "AnalysisPlotter", "(", "self", ".", "config", "[", "'plotting'", "]", ",", "fileio", "=", "self", ".", "config", "[", "'fileio'", "]", ",", "logging", "=", "self", ".", "config", "[", "'logging'", "]", ")", "plotter", ".", "make_tsmap_plots", "(", "maps", ",", "self", ".", "roi", ",", "suffix", "=", "'tscube'", ")", "self", ".", "logger", ".", "info", "(", "\"Finished TS cube\"", ")", "return", "maps" ]
Calculate the average gradient for each shared variable across all towers .
def avg_grads ( tower_grads ) : average_grads = [ ] for grad_and_vars in zip ( * tower_grads ) : # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [ ] for g , _ in grad_and_vars : # Add 0 dimension to the gradients to represent the tower. expanded_g = tf . expand_dims ( g , 0 ) # Append on a 'tower' dimension which we will average over below. grads . append ( expanded_g ) # Average over the 'tower' dimension. grad = tf . concat ( 0 , grads ) grad = tf . reduce_mean ( grad , 0 ) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars [ 0 ] [ 1 ] grad_and_var = ( grad , v ) average_grads . append ( grad_and_var ) return average_grads
12,171
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/ops.py#L264-L299
[ "def", "get_entity_info", "(", "pdb_id", ")", ":", "out", "=", "get_info", "(", "pdb_id", ",", "url_root", "=", "'http://www.rcsb.org/pdb/rest/getEntityInfo?structureId='", ")", "out", "=", "to_dict", "(", "out", ")", "return", "remove_at_sign", "(", "out", "[", "'entityInfo'", "]", "[", "'PDB'", "]", ")" ]
convert escaped unicode web entities to unicode
def unescape_utf8 ( msg ) : def sub ( m ) : text = m . group ( 0 ) if text [ : 3 ] == "&#x" : return unichr ( int ( text [ 3 : - 1 ] , 16 ) ) else : return unichr ( int ( text [ 2 : - 1 ] ) ) return re . sub ( "&#?\w+;" , sub , urllib . unquote ( msg ) )
12,172
https://github.com/kwlzn/blast/blob/ae18a19182a6884c453bf9b2a3c6386bd3b2655a/blast/main.py#L41-L47
[ "def", "ParseFileObject", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "file_offset", "=", "file_object", ".", "get_offset", "(", ")", "file_size", "=", "file_object", ".", "get_size", "(", ")", "while", "file_offset", "<", "file_size", ":", "try", ":", "self", ".", "_ParseRecord", "(", "parser_mediator", ",", "file_object", ")", "except", "errors", ".", "ParseError", "as", "exception", ":", "if", "file_offset", "==", "0", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Unable to parse first event record with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "# TODO: skip to next event record.", "file_offset", "=", "file_object", ".", "get_offset", "(", ")" ]
function to ensure the given check value is in the given data type if yes return the check value directly otherwise return the default value
def ensure ( data_type , check_value , default_value = None ) : if default_value is not None and not isinstance ( default_value , data_type ) : raise ValueError ( "default_value must be the value in the given data " "type." ) elif isinstance ( check_value , data_type ) : return check_value try : new_value = data_type ( check_value ) except : return default_value return new_value
12,173
https://github.com/KnightConan/sspdatatables/blob/1179a11358734e5e472e5eee703e8d34fa49e9bf/src/sspdatatables/utils/data_type_ensure.py#L7-L27
[ "def", "_spark_map", "(", "fun", ",", "indexed_param_grid", ",", "sc", ",", "seed", ",", "X_bc", ")", ":", "def", "_wrap_random_state", "(", "split_index", ",", "partition", ")", ":", "prng", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "+", "split_index", ")", "yield", "map", "(", "partial", "(", "fun", ",", "prng", "=", "prng", ",", "X", "=", "X_bc", ")", ",", "partition", ")", "par_param_grid", "=", "sc", ".", "parallelize", "(", "indexed_param_grid", ")", "indexed_results", "=", "par_param_grid", ".", "mapPartitionsWithIndex", "(", "_wrap_random_state", ")", ".", "collect", "(", ")", "return", "[", "item", "for", "sublist", "in", "indexed_results", "for", "item", "in", "sublist", "]" ]
Mark the specified task as resolved in the FailedTask table .
def mark_resolved ( task_id ) : from . import models models . FailedTask . objects . filter ( task_id = task_id , datetime_resolved = None ) . update ( datetime_resolved = now ( ) )
12,174
https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/tasks.py#L13-L21
[ "def", "move_vobject", "(", "self", ",", "uid", ",", "from_file", ",", "to_file", ")", ":", "if", "from_file", "not", "in", "self", ".", "_reminders", "or", "to_file", "not", "in", "self", ".", "_reminders", ":", "return", "uid", "=", "uid", ".", "split", "(", "'@'", ")", "[", "0", "]", "with", "self", ".", "_lock", ":", "rem", "=", "open", "(", "from_file", ")", ".", "readlines", "(", ")", "for", "(", "index", ",", "line", ")", "in", "enumerate", "(", "rem", ")", ":", "if", "uid", "==", "md5", "(", "line", "[", ":", "-", "1", "]", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", ":", "del", "rem", "[", "index", "]", "open", "(", "from_file", ",", "'w'", ")", ".", "writelines", "(", "rem", ")", "open", "(", "to_file", ",", "'a'", ")", ".", "write", "(", "line", ")", "break" ]
Ratio test to check if two floating point numbers are equal .
def is_equal ( a , b , tol ) : if a == b or abs ( a - b ) <= tol * max ( abs ( a ) , abs ( b ) ) : return True else : return False
12,175
https://github.com/flo-compbio/xlmhg/blob/8e5929ee1dc91b95e343b7a2b1b1d6664c4540a1/xlmhg/mhg.py#L28-L48
[ "def", "FromData", "(", "cls", ",", "stream", ",", "json_data", ",", "http", ",", "auto_transfer", "=", "None", ",", "gzip_encoded", "=", "False", ",", "*", "*", "kwds", ")", ":", "info", "=", "json", ".", "loads", "(", "json_data", ")", "missing_keys", "=", "cls", ".", "_REQUIRED_SERIALIZATION_KEYS", "-", "set", "(", "info", ".", "keys", "(", ")", ")", "if", "missing_keys", ":", "raise", "exceptions", ".", "InvalidDataError", "(", "'Invalid serialization data, missing keys: %s'", "%", "(", "', '", ".", "join", "(", "missing_keys", ")", ")", ")", "if", "'total_size'", "in", "kwds", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot override total_size on serialized Upload'", ")", "upload", "=", "cls", ".", "FromStream", "(", "stream", ",", "info", "[", "'mime_type'", "]", ",", "total_size", "=", "info", ".", "get", "(", "'total_size'", ")", ",", "gzip_encoded", "=", "gzip_encoded", ",", "*", "*", "kwds", ")", "if", "isinstance", "(", "stream", ",", "io", ".", "IOBase", ")", "and", "not", "stream", ".", "seekable", "(", ")", ":", "raise", "exceptions", ".", "InvalidUserInputError", "(", "'Cannot restart resumable upload on non-seekable stream'", ")", "if", "auto_transfer", "is", "not", "None", ":", "upload", ".", "auto_transfer", "=", "auto_transfer", "else", ":", "upload", ".", "auto_transfer", "=", "info", "[", "'auto_transfer'", "]", "upload", ".", "strategy", "=", "RESUMABLE_UPLOAD", "upload", ".", "_Initialize", "(", "# pylint: disable=protected-access", "http", ",", "info", "[", "'url'", "]", ")", "upload", ".", "RefreshResumableUploadState", "(", ")", "upload", ".", "EnsureInitialized", "(", ")", "if", "upload", ".", "auto_transfer", ":", "upload", ".", "StreamInChunks", "(", ")", "return", "upload" ]
Returns a sublist view for all ports of given side .
def getPortSideView ( self , side ) -> List [ "LPort" ] : if side == PortSide . WEST : return self . west elif side == PortSide . EAST : return self . east elif side == PortSide . NORTH : return self . north elif side == PortSide . SOUTH : return self . south else : raise ValueError ( side )
12,176
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/containers/lNode.py#L53-L76
[ "def", "consumer_commit_for_times", "(", "consumer", ",", "partition_to_offset", ",", "atomic", "=", "False", ")", ":", "no_offsets", "=", "set", "(", ")", "for", "tp", ",", "offset", "in", "six", ".", "iteritems", "(", "partition_to_offset", ")", ":", "if", "offset", "is", "None", ":", "logging", ".", "error", "(", "\"No offsets found for topic-partition {tp}. Either timestamps not supported\"", "\" for the topic {tp}, or no offsets found after timestamp specified, or there is no\"", "\" data in the topic-partition.\"", ".", "format", "(", "tp", "=", "tp", ")", ",", ")", "no_offsets", ".", "add", "(", "tp", ")", "if", "atomic", "and", "len", "(", "no_offsets", ")", ">", "0", ":", "logging", ".", "error", "(", "\"Commit aborted; offsets were not found for timestamps in\"", "\" topics {}\"", ".", "format", "(", "\",\"", ".", "join", "(", "[", "str", "(", "tp", ")", "for", "tp", "in", "no_offsets", "]", ")", ")", ",", ")", "return", "offsets_metadata", "=", "{", "tp", ":", "OffsetAndMetadata", "(", "partition_to_offset", "[", "tp", "]", ".", "offset", ",", "metadata", "=", "None", ")", "for", "tp", "in", "six", ".", "iterkeys", "(", "partition_to_offset", ")", "if", "tp", "not", "in", "no_offsets", "}", "if", "len", "(", "offsets_metadata", ")", "!=", "0", ":", "consumer", ".", "commit", "(", "offsets_metadata", ")" ]
Iter edges connected from outside of this unit
def iterEdges ( self , filterSelfLoops = False ) : for p in self . iterPorts ( ) : yield from p . iterEdges ( filterSelfLoops = filterSelfLoops )
12,177
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/containers/lNode.py#L94-L99
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Links the content found at source_path and represents a Block that represents the content .
def link ( source_path ) : if not os . path . isfile ( source_path ) : raise SourceNotFound ( source_path ) with open ( source_path , 'r' ) as f : content = f . read ( ) block_map = BlockMap ( ) # The map will be populated with the following function call. all_block = convert_lines_to_block ( content . splitlines ( ) , block_map , LinkStack ( source_path ) , source_path ) return all_block , block_map . get_variables ( )
12,178
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/linker.py#L10-L19
[ "def", "set_USRdict", "(", "self", ",", "USRdict", "=", "{", "}", ")", ":", "self", ".", "_check_inputs", "(", "USRdict", "=", "USRdict", ")", "self", ".", "_USRdict", "=", "USRdict" ]
Process a string of content for include tags .
def process_links ( include_match , block_map , link_stack , source_path ) : leading_whitespace = include_match . group ( 1 ) include_path = include_match . group ( 2 ) # Optional block name. If match is None, block name was ommitted (default to 'all'). block_name = include_match . group ( 3 ) if block_name is not None : block_name = block_name . lstrip ( ':' ) else : block_name = ALL_BLOCK_NAME return retrieve_block_from_map ( source_path , include_path . strip ( ) , block_name . strip ( ) , leading_whitespace , block_map , link_stack )
12,179
https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/linker.py#L232-L263
[ "def", "cancel_order", "(", "self", ",", "order_id", ",", "stock", ")", ":", "url_fragment", "=", "'venues/{venue}/stocks/{stock}/orders/{order_id}'", ".", "format", "(", "venue", "=", "self", ".", "venue", ",", "stock", "=", "stock", ",", "order_id", "=", "order_id", ",", ")", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "url_fragment", ")", "return", "self", ".", "session", ".", "delete", "(", "url", ")", ".", "json", "(", ")" ]
Wrap the function in a warnings . catch_warnings context .
def catch_warnings ( action , category = Warning , lineno = 0 , append = False ) : def decorator ( func ) : @ functools . wraps ( func ) def newfunc ( * args , * * kwargs ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( action , category , lineno , append ) return func ( * args , * * kwargs ) return newfunc return decorator
12,180
https://github.com/althonos/moclo/blob/28a03748df8a2fa43f0c0c8098ca64d11559434e/moclo/moclo/_utils.py#L27-L54
[ "def", "_update_expander_status", "(", "self", ",", "message", ")", ":", "if", "message", ".", "type", "==", "ExpanderMessage", ".", "RELAY", ":", "self", ".", "_relay_status", "[", "(", "message", ".", "address", ",", "message", ".", "channel", ")", "]", "=", "message", ".", "value", "self", ".", "on_relay_changed", "(", "message", "=", "message", ")", "return", "self", ".", "_relay_status", "[", "(", "message", ".", "address", ",", "message", ".", "channel", ")", "]" ]
try to guess which module import app . py
def _guess_caller ( ) : import inspect global _caller_path caller = inspect . stack ( ) [ 1 ] caller_module = inspect . getmodule ( caller [ 0 ] ) if hasattr ( caller_module , '__file__' ) : _caller_path = os . path . abspath ( caller_module . __file__ ) return _caller_path
12,181
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L564-L575
[ "def", "set_keyvault_secret", "(", "access_token", ",", "vault_uri", ",", "secret_name", ",", "secret_value", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "vault_uri", ",", "'/secrets/'", ",", "secret_name", ",", "'?api-version='", ",", "'7.0'", "]", ")", "current_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "attributes", "=", "{", "'created'", ":", "current_time", ",", "'enabled'", ":", "True", ",", "'exp'", ":", "None", ",", "'nbf'", ":", "None", ",", "'recoveryLevel'", ":", "'Purgeable'", ",", "'updated'", ":", "current_time", "}", "secret_body", "=", "{", "'attributes'", ":", "attributes", ",", "'contentType'", ":", "None", ",", "'kid'", ":", "None", ",", "'managed'", ":", "None", ",", "'tags'", ":", "{", "'file-encoding'", ":", "'utf-8'", "}", ",", "'value'", ":", "secret_value", "}", "body", "=", "json", ".", "dumps", "(", "secret_body", ")", "print", "(", "body", ")", "return", "do_put", "(", "endpoint", ",", "body", ",", "access_token", ")" ]
fix static_path and template_path to be absolute path according to self . root_path so that PWD can be ignoreed .
def _fix_paths ( self , options ) : for k in ( 'template_path' , 'static_path' ) : if k in options : v = options . pop ( k ) if v is None : continue if not os . path . isabs ( v ) : v = os . path . abspath ( os . path . join ( self . root_path , v ) ) app_log . debug ( 'Fix %s to be absolute: %s' % ( k , v ) ) options [ k ] = v
12,182
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L190-L204
[ "def", "_add", "(", "self", ",", "hostport", ")", ":", "peer", "=", "self", ".", "peer_class", "(", "tchannel", "=", "self", ".", "tchannel", ",", "hostport", "=", "hostport", ",", "on_conn_change", "=", "self", ".", "_update_heap", ",", ")", "peer", ".", "rank", "=", "self", ".", "rank_calculator", ".", "get_rank", "(", "peer", ")", "self", ".", "_peers", "[", "peer", ".", "hostport", "]", "=", "peer", "self", ".", "peer_heap", ".", "add_and_shuffle", "(", "peer", ")" ]
This is a decorator
def route ( self , url , host = None ) : def fn ( handler_cls ) : handlers = self . _get_handlers_on_host ( host ) handlers . insert ( 0 , ( url , handler_cls ) ) return handler_cls return fn
12,183
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L214-L221
[ "def", "create_dirs", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "_path", ")", ":", "os", ".", "makedirs", "(", "self", ".", "_path", ")", "for", "dir_name", "in", "[", "self", ".", "OBJ_DIR", ",", "self", ".", "TMP_OBJ_DIR", ",", "self", ".", "PKG_DIR", ",", "self", ".", "CACHE_DIR", "]", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "dir_name", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "os", ".", "mkdir", "(", "path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_version_path", "(", ")", ")", ":", "self", ".", "_write_format_version", "(", ")" ]
settings . py is the basis
def command_line_config ( self ) : args = sys . argv [ 1 : ] args_dict = { } existed_keys = [ ] new_keys = [ ] for t in args : if not t . startswith ( '--' ) : raise errors . ArgsParseError ( 'Bad arg: %s' % t ) try : key , value = tuple ( t [ 2 : ] . split ( '=' ) ) except : raise errors . ArgsParseError ( 'Bad arg: %s' % t ) args_dict [ key ] = value if key in settings : existed_keys . append ( key ) else : new_keys . append ( key ) if existed_keys : app_log . debug ( 'Changed settings:' ) for i in existed_keys : before = settings [ i ] type_ = type ( before ) if type_ is bool : if args_dict [ i ] == 'True' : _value = True elif args_dict [ i ] == 'False' : _value = False else : raise errors . ArgsParseError ( '%s should only be True or False' % i ) else : _value = type_ ( args_dict [ i ] ) settings [ i ] = _value app_log . debug ( ' %s [%s]%s (%s)' , i , type ( settings [ i ] ) , settings [ i ] , before ) if new_keys : app_log . debug ( 'New settings:' ) for i in new_keys : settings [ i ] = args_dict [ i ] app_log . debug ( ' %s %s' , i , args_dict [ i ] ) # NOTE if ``command_line_config`` is called, logging must be re-configed self . update_settings ( { } )
12,184
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L266-L325
[ "def", "DeleteNotifications", "(", "self", ",", "session_ids", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "if", "not", "session_ids", ":", "return", "for", "session_id", "in", "session_ids", ":", "if", "not", "isinstance", "(", "session_id", ",", "rdfvalue", ".", "SessionID", ")", ":", "raise", "RuntimeError", "(", "\"Can only delete notifications for rdfvalue.SessionIDs.\"", ")", "if", "start", "is", "None", ":", "start", "=", "0", "else", ":", "start", "=", "int", "(", "start", ")", "if", "end", "is", "None", ":", "end", "=", "self", ".", "frozen_timestamp", "or", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "for", "queue", ",", "ids", "in", "iteritems", "(", "collection", ".", "Group", "(", "session_ids", ",", "lambda", "session_id", ":", "session_id", ".", "Queue", "(", ")", ")", ")", ":", "queue_shards", "=", "self", ".", "GetAllNotificationShards", "(", "queue", ")", "self", ".", "data_store", ".", "DeleteNotifications", "(", "queue_shards", ",", "ids", ",", "start", ",", "end", ")" ]
This function will be called both before run and testing started .
def setup ( self ) : testing = settings . get ( 'TESTING' ) if testing : # Fix nose handler in testing situation. config = settings [ 'LOGGERS' ] . get ( '' , { } ) set_nose_formatter ( config ) #print('testing, set nose formatter: {}'.format(config)) # reset timezone os . environ [ 'TZ' ] = settings [ 'TIME_ZONE' ] time . tzset ( ) # determine project name if settings . _module : project = os . path . split ( self . root_path ) [ 1 ] if settings [ 'PROJECT' ] : assert settings [ 'PROJECT' ] == project , 'PROJECT specialized in settings (%s) ' 'should be the same as project directory name (%s)' % ( settings [ 'PROJECT' ] , project ) else : settings [ 'PROJECT' ] = project # PROJECT should be importable as a python module if settings [ 'PROJECT' ] : # add upper directory path to sys.path if not in if settings . _module : _abs = os . path . abspath parent_path = os . path . dirname ( self . root_path ) if not _abs ( parent_path ) in [ _abs ( i ) for i in sys . path ] : sys . path . insert ( 0 , parent_path ) app_log . info ( 'Add %s to sys.path' % _abs ( parent_path ) ) try : __import__ ( settings [ 'PROJECT' ] ) app_log . debug ( 'import package `%s` success' % settings [ 'PROJECT' ] ) except ImportError : raise ImportError ( 'PROJECT could not be imported, may be app.py is outside the project' 'or there is no __init__ in the package.' ) self . is_setuped = True
12,185
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L327-L367
[ "def", "DownloadDir", "(", "aff4_path", ",", "output_dir", ",", "bufsize", "=", "8192", ",", "preserve_path", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "fd", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "aff4_path", ")", "for", "child", "in", "fd", ".", "OpenChildren", "(", ")", ":", "if", "preserve_path", ":", "# Get a full path without the aff4:", "full_dir", "=", "utils", ".", "JoinPath", "(", "output_dir", ",", "child", ".", "urn", ".", "Path", "(", ")", ")", "full_dir", "=", "os", ".", "path", ".", "dirname", "(", "full_dir", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "full_dir", ")", ":", "os", ".", "makedirs", "(", "full_dir", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "full_dir", ",", "child", ".", "urn", ".", "Basename", "(", ")", ")", "else", ":", "outfile", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "child", ".", "urn", ".", "Basename", "(", ")", ")", "logging", ".", "info", "(", "u\"Downloading %s to %s\"", ",", "child", ".", "urn", ",", "outfile", ")", "with", "open", "(", "outfile", ",", "\"wb\"", ")", "as", "out_fd", ":", "try", ":", "buf", "=", "child", ".", "Read", "(", "bufsize", ")", "while", "buf", ":", "out_fd", ".", "write", "(", "buf", ")", "buf", "=", "child", ".", "Read", "(", "bufsize", ")", "except", "IOError", "as", "e", ":", "logging", ".", "error", "(", "\"Failed to read %s. Err: %s\"", ",", "child", ".", "urn", ",", "e", ")" ]
Initialize application object for torext app if a existed application is passed then just use this one without make a new one
def _init_application ( self , application = None ) : if application : self . application = application else : self . application = self . make_application ( )
12,186
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L530-L536
[ "def", "check_keypoints", "(", "keypoints", ",", "rows", ",", "cols", ")", ":", "for", "kp", "in", "keypoints", ":", "check_keypoint", "(", "kp", ",", "rows", ",", "cols", ")" ]
Override Application . log_function so that what to log can be controlled .
def _log_function ( self , handler ) : if handler . get_status ( ) < 400 : log_method = request_log . info elif handler . get_status ( ) < 500 : log_method = request_log . warning else : log_method = request_log . error for i in settings [ 'LOGGING_IGNORE_URLS' ] : if handler . request . uri . startswith ( i ) : log_method = request_log . debug break request_time = 1000.0 * handler . request . request_time ( ) log_method ( "%d %s %.2fms" , handler . get_status ( ) , handler . _request_summary ( ) , request_time )
12,187
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/app.py#L542-L558
[ "def", "_getAssociation", "(", "self", ",", "endpoint", ")", ":", "assoc", "=", "self", ".", "store", ".", "getAssociation", "(", "endpoint", ".", "server_url", ")", "if", "assoc", "is", "None", "or", "assoc", ".", "expiresIn", "<=", "0", ":", "assoc", "=", "self", ".", "_negotiateAssociation", "(", "endpoint", ")", "if", "assoc", "is", "not", "None", ":", "self", ".", "store", ".", "storeAssociation", "(", "endpoint", ".", "server_url", ",", "assoc", ")", "return", "assoc" ]
Xavier initialization of network weights
def xavier_init ( fan_in , fan_out , constant = 1 ) : # https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow low = - constant * np . sqrt ( 6.0 / ( fan_in + fan_out ) ) high = constant * np . sqrt ( 6.0 / ( fan_in + fan_out ) ) return tf . random_uniform ( ( fan_in , fan_out ) , minval = low , maxval = high , dtype = tf . float32 )
12,188
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L8-L15
[ "def", "get_editor_query", "(", "sql", ")", ":", "sql", "=", "sql", ".", "strip", "(", ")", "# The reason we can't simply do .strip('\\e') is that it strips characters,", "# not a substring. So it'll strip \"e\" in the end of the sql also!", "# Ex: \"select * from style\\e\" -> \"select * from styl\".", "pattern", "=", "re", ".", "compile", "(", "'(^\\\\\\e|\\\\\\e$)'", ")", "while", "pattern", ".", "search", "(", "sql", ")", ":", "sql", "=", "pattern", ".", "sub", "(", "''", ",", "sql", ")", "return", "sql" ]
Train model based on mini - batch of input data . Return cost of mini - batch .
def partial_fit ( self , X ) : opt , cost = self . sess . run ( ( self . optimizer , self . cost ) , feed_dict = { self . x : X } ) return cost
12,189
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L155-L162
[ "def", "close", "(", "self", ")", ":", "with", "self", ".", "_close_lock", ":", "epfd", "=", "self", ".", "_epfd", "if", "epfd", ">=", "0", ":", "self", ".", "_epfd", "=", "-", "1", "close", "(", "epfd", ")" ]
Transform data by mapping it into the latent space .
def transform ( self , X ) : # Note: This maps to mean of distribution, we could alternatively # sample from Gaussian distribution return self . sess . run ( self . z_mean , feed_dict = { self . x : X } )
12,190
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L164-L168
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''", ".", "join", "(", "lines", ")", ")", "payload", ",", "checksum", "=", "data", "[", ":", "-", "3", "]", ",", "data", "[", "-", "3", ":", "]", "assert", "util", ".", "crc24", "(", "payload", ")", "==", "checksum", "return", "payload" ]
Generate data by sampling from latent space . If z_mu is not None data for this point in latent space is generated . Otherwise z_mu is drawn from prior in latent space .
def generate ( self , z_mu = None ) : if z_mu is None : z_mu = np . random . normal ( size = self . network_architecture [ "n_z" ] ) # Note: This maps to mean of distribution, we could alternatively # sample from Gaussian distribution return self . sess . run ( self . x_reconstr_mean , feed_dict = { self . z : z_mu } )
12,191
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L170-L182
[ "def", "uxor", "(", "self", ")", ":", "return", "reduce", "(", "operator", ".", "xor", ",", "self", ".", "_items", ",", "self", ".", "ftype", ".", "box", "(", "0", ")", ")" ]
Use VAE to reconstruct given data .
def reconstruct ( self , X ) : return self . sess . run ( self . x_reconstr_mean , feed_dict = { self . x : X } )
12,192
https://github.com/255BITS/hyperchamber/blob/4d5774bde9ea6ce1113f77a069ffc605148482b8/examples/shared/variational_autoencoder.py#L184-L187
[ "def", "unit_is_related", "(", "self", ",", "location", ",", "worksheet", ")", ":", "same_worksheet", "=", "worksheet", "==", "self", ".", "worksheet", "if", "isinstance", "(", "location", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "(", "location", "[", "0", "]", ">=", "self", ".", "start", "[", "0", "]", "and", "location", "[", "0", "]", "<", "self", ".", "end", "[", "0", "]", "and", "location", "[", "1", "]", ">=", "self", ".", "start", "[", "1", "]", "and", "location", "[", "1", "]", "<", "self", ".", "end", "[", "1", "]", "and", "same_worksheet", ")", "else", ":", "return", "same_worksheet" ]
Determine triangular elements adjacend to the boundary elements
def get_ajd_bound ( mesh ) : print ( 'Get elements adjacent to boundaries' ) boundary_elements = [ ] str_adj_boundaries = '' # for boundary in mesh['elements']['1']: boundaries = mesh [ 'boundaries' ] [ '12' ] + mesh [ 'boundaries' ] [ '11' ] for boundary in boundaries : # now find the triangle ('2') with two nodes equal to this boundary indices = [ nr if ( boundary [ 0 ] in x and boundary [ 1 ] in x ) else np . nan for ( nr , x ) in enumerate ( mesh [ 'elements' ] [ '2' ] ) ] indices = np . array ( indices ) [ ~ np . isnan ( indices ) ] if ( len ( indices ) != 1 ) : print ( 'More than one neighbour found!' ) elif ( len ( indices ) == 0 ) : print ( 'No neighbour found!' ) boundary_elements . append ( indices [ 0 ] ) str_adj_boundaries += '{0}\n' . format ( int ( indices [ 0 ] ) + 1 ) return str_adj_boundaries , boundary_elements
12,193
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/cr_trig_parse_gmsh.py#L385-L405
[ "def", "set_text", "(", "self", ",", "text", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "text", "is", "None", ":", "text", "=", "{", "}", "text", ".", "update", "(", "popdict", "(", "kwargs", ",", "_registered_kw", ")", ")", "if", "'Creation Time'", "in", "text", "and", "not", "isinstance", "(", "text", "[", "'Creation Time'", "]", ",", "(", "basestring", ",", "bytes", ")", ")", ":", "text", "[", "'Creation Time'", "]", "=", "datetime", ".", "datetime", "(", "*", "(", "check_time", "(", "text", "[", "'Creation Time'", "]", ")", "[", ":", "6", "]", ")", ")", ".", "isoformat", "(", ")", "self", ".", "text", "=", "text" ]
Read in the electrode positions and return the indices of the electrodes
def write_elec_file ( filename , mesh ) : elecs = [ ] # print('Write electrodes') electrodes = np . loadtxt ( filename ) for i in electrodes : # find for nr , j in enumerate ( mesh [ 'nodes' ] ) : if np . isclose ( j [ 1 ] , i [ 0 ] ) and np . isclose ( j [ 2 ] , i [ 1 ] ) : elecs . append ( nr + 1 ) fid = open ( 'elec.dat' , 'w' ) fid . write ( '{0}\n' . format ( len ( elecs ) ) ) for i in elecs : fid . write ( '{0}\n' . format ( i ) ) fid . close ( )
12,194
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/cr_trig_parse_gmsh.py#L408-L427
[ "def", "_augment_exception", "(", "exc", ",", "version", ",", "arch", "=", "''", ")", ":", "# Error if MSVC++ directory not found or environment not set", "message", "=", "exc", ".", "args", "[", "0", "]", "if", "\"vcvarsall\"", "in", "message", ".", "lower", "(", ")", "or", "\"visual c\"", "in", "message", ".", "lower", "(", ")", ":", "# Special error message if MSVC++ not installed", "tmpl", "=", "'Microsoft Visual C++ {version:0.1f} is required.'", "message", "=", "tmpl", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "msdownload", "=", "'www.microsoft.com/download/details.aspx?id=%d'", "if", "version", "==", "9.0", ":", "if", "arch", ".", "lower", "(", ")", ".", "find", "(", "'ia64'", ")", ">", "-", "1", ":", "# For VC++ 9.0, if IA64 support is needed, redirect user", "# to Windows SDK 7.0", "message", "+=", "' Get it with \"Microsoft Windows SDK 7.0\": '", "message", "+=", "msdownload", "%", "3138", "else", ":", "# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :", "# This redirection link is maintained by Microsoft.", "# Contact vspython@microsoft.com if it needs updating.", "message", "+=", "' Get it from http://aka.ms/vcpython27'", "elif", "version", "==", "10.0", ":", "# For VC++ 10.0 Redirect user to Windows SDK 7.1", "message", "+=", "' Get it with \"Microsoft Windows SDK 7.1\": '", "message", "+=", "msdownload", "%", "8279", "elif", "version", ">=", "14.0", ":", "# For VC++ 14.0 Redirect user to Visual C++ Build Tools", "message", "+=", "(", "' Get it with \"Microsoft Visual C++ Build Tools\": '", "r'https://visualstudio.microsoft.com/downloads/'", ")", "exc", ".", "args", "=", "(", "message", ",", ")" ]
Returns the MDP state size .
def state_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . state_size )
12,195
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L77-L79
[ "def", "removeAllChildrenAtIndex", "(", "self", ",", "parentIndex", ")", ":", "if", "not", "parentIndex", ".", "isValid", "(", ")", ":", "logger", ".", "debug", "(", "\"No valid item selected for deletion (ignored).\"", ")", "return", "parentItem", "=", "self", ".", "getItem", "(", "parentIndex", ",", "None", ")", "logger", ".", "debug", "(", "\"Removing children of {!r}\"", ".", "format", "(", "parentItem", ")", ")", "assert", "parentItem", ",", "\"parentItem not found\"", "#firstChildRow = self.index(0, 0, parentIndex).row()", "#lastChildRow = self.index(parentItem.nChildren()-1, 0, parentIndex).row()", "#logger.debug(\"Removing rows: {} to {}\".format(firstChildRow, lastChildRow))", "#self.beginRemoveRows(parentIndex, firstChildRow, lastChildRow)", "self", ".", "beginRemoveRows", "(", "parentIndex", ",", "0", ",", "parentItem", ".", "nChildren", "(", ")", "-", "1", ")", "try", ":", "parentItem", ".", "removeAllChildren", "(", ")", "finally", ":", "self", ".", "endRemoveRows", "(", ")", "logger", ".", "debug", "(", "\"removeAllChildrenAtIndex completed\"", ")" ]
Returns the MDP action size .
def action_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . action_size )
12,196
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L82-L84
[ "def", "decrypt", "(", "ciphertext_blob", ",", "encryption_context", "=", "None", ",", "grant_tokens", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "r", "=", "{", "}", "try", ":", "plaintext", "=", "conn", ".", "decrypt", "(", "ciphertext_blob", ",", "encryption_context", "=", "encryption_context", ",", "grant_tokens", "=", "grant_tokens", ")", "r", "[", "'plaintext'", "]", "=", "plaintext", "[", "'Plaintext'", "]", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "r", "[", "'error'", "]", "=", "__utils__", "[", "'boto.get_error'", "]", "(", "e", ")", "return", "r" ]
Returns the MDP intermediate state size .
def interm_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . interm_size )
12,197
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L87-L89
[ "def", "delete_authoring_nodes", "(", "self", ",", "editor", ")", ":", "editor_node", "=", "foundations", ".", "common", ".", "get_first_item", "(", "self", ".", "get_editor_nodes", "(", "editor", ")", ")", "file_node", "=", "editor_node", ".", "parent", "self", ".", "unregister_editor", "(", "editor_node", ")", "self", ".", "unregister_file", "(", "file_node", ",", "raise_exception", "=", "False", ")", "return", "True" ]
Returns the simulation cell output size .
def output_size ( self ) -> Tuple [ Sequence [ Shape ] , Sequence [ Shape ] , Sequence [ Shape ] , int ] : return ( self . state_size , self . action_size , self . interm_size , 1 )
12,198
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L92-L94
[ "def", "getGroupsURL", "(", "certfile", ",", "group", ")", ":", "GMS", "=", "\"https://\"", "+", "_SERVER", "+", "_GMS", "certfile", ".", "seek", "(", "0", ")", "buf", "=", "certfile", ".", "read", "(", ")", "x509", "=", "crypto", ".", "load_certificate", "(", "crypto", ".", "FILETYPE_PEM", ",", "buf", ")", "sep", "=", "\"\"", "dn", "=", "\"\"", "parts", "=", "[", "]", "for", "i", "in", "x509", ".", "get_issuer", "(", ")", ".", "get_components", "(", ")", ":", "#print i", "if", "i", "[", "0", "]", "in", "parts", ":", "continue", "parts", ".", "append", "(", "i", "[", "0", "]", ")", "dn", "=", "i", "[", "0", "]", "+", "\"=\"", "+", "i", "[", "1", "]", "+", "sep", "+", "dn", "sep", "=", "\",\"", "return", "GMS", "+", "\"/\"", "+", "group", "+", "\"/\"", "+", "urllib", ".", "quote", "(", "dn", ")" ]
Returns the initial state tensor .
def initial_state ( self ) -> StateTensor : s0 = [ ] for fluent in self . _compiler . compile_initial_state ( self . _batch_size ) : s0 . append ( self . _output_size ( fluent ) ) s0 = tuple ( s0 ) return s0
12,199
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L96-L102
[ "def", "remove_expired_multipartobjects", "(", ")", ":", "delta", "=", "current_app", ".", "config", "[", "'FILES_REST_MULTIPART_EXPIRES'", "]", "expired_dt", "=", "datetime", ".", "utcnow", "(", ")", "-", "delta", "file_ids", "=", "[", "]", "for", "mp", "in", "MultipartObject", ".", "query_expired", "(", "expired_dt", ")", ":", "file_ids", ".", "append", "(", "str", "(", "mp", ".", "file_id", ")", ")", "mp", ".", "delete", "(", ")", "for", "fid", "in", "file_ids", ":", "remove_file_data", ".", "delay", "(", "fid", ")" ]