idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,000
def request_session ( token , url = None ) : if url is None : api = SlackApi ( ) else : api = SlackApi ( url ) response = api . rtm . start ( token = token ) return SessionMetadata ( response , api , token )
Requests a WebSocket session for the Real - Time Messaging API . Returns a SessionMetadata object containing the information retrieved from the API call .
58
30
12,001
def _find_resource_by_key ( self , resource_list , key , value ) : original = value value = unicode ( value . upper ( ) ) for k , resource in resource_list . iteritems ( ) : if key in resource and resource [ key ] . upper ( ) == value : return k , resource raise KeyError , original
Finds a resource by key first case insensitive match .
74
11
12,002
def find_im_by_user_name ( self , name , auto_create = True ) : uid = self . find_user_by_name ( name ) [ 0 ] try : return self . find_im_by_user_id ( uid ) except KeyError : # IM does not exist, create it? if auto_create : response = self . api . im . open ( token = self . token , user = uid ) return response [ u'channel' ] [ u'id' ] else : raise
Finds the ID of the IM with a particular user by name with the option to automatically create a new channel if it doesn t exist .
114
28
12,003
def update ( self , event ) : # Create our own copy of the event data, as we'll be pushing that to # another data structure and we don't want it mangled by user code later # on. event = event . copy ( ) # Now just defer the work to later. reactor . callInThread ( self . _update_deferred , event )
All messages from the Protocol get passed through this method . This allows the client to have an up - to - date state for the client . However this method doesn t actually update right away . Instead the acutal update happens in another thread potentially later in order to allow user code to handle the event faster .
76
62
12,004
def iter_series ( self , workbook , row , col ) : for series in self . __series : series = dict ( series ) series [ "values" ] = series [ "values" ] . get_formula ( workbook , row , col ) if "categories" in series : series [ "categories" ] = series [ "categories" ] . get_formula ( workbook , row , col ) yield series
Yield series dictionaries with values resolved to the final excel formulas .
93
14
12,005
def get_userinfo ( self ) : wanted_fields = [ "name" , "mobile" , "orgEmail" , "position" , "avatar" ] userinfo = { k : self . json_response . get ( k , None ) for k in wanted_fields } return userinfo
Method to get current user s name mobile email and position .
64
12
12,006
def get_admin_ids ( self ) : admins = self . json_response . get ( "admin_list" , None ) admin_ids = [ admin_id for admin_id in admins [ "userid" ] ] return admin_ids
Method to get the administrator id list .
53
8
12,007
def run ( program , * args , * * kwargs ) : args = flattened ( args , split = SHELL ) full_path = which ( program ) logger = kwargs . pop ( "logger" , LOG . debug ) fatal = kwargs . pop ( "fatal" , True ) dryrun = kwargs . pop ( "dryrun" , is_dryrun ( ) ) include_error = kwargs . pop ( "include_error" , False ) message = "Would run" if dryrun else "Running" message = "%s: %s %s" % ( message , short ( full_path or program ) , represented_args ( args ) ) if logger : logger ( message ) if dryrun : return message if not full_path : return abort ( "%s is not installed" , short ( program ) , fatal = fatal ) stdout = kwargs . pop ( "stdout" , subprocess . PIPE ) stderr = kwargs . pop ( "stderr" , subprocess . PIPE ) args = [ full_path ] + args try : path_env = kwargs . pop ( "path_env" , None ) if path_env : kwargs [ "env" ] = added_env_paths ( path_env , env = kwargs . get ( "env" ) ) p = subprocess . Popen ( args , stdout = stdout , stderr = stderr , * * kwargs ) # nosec output , err = p . communicate ( ) output = decode ( output , strip = True ) err = decode ( err , strip = True ) if p . returncode and fatal is not None : note = ": %s\n%s" % ( err , output ) if output or err else "" message = "%s exited with code %s%s" % ( short ( program ) , p . returncode , note . strip ( ) ) return abort ( message , fatal = fatal ) if include_error and err : output = "%s\n%s" % ( output , err ) return output and output . strip ( ) except Exception as e : return abort ( "%s failed: %s" , short ( program ) , e , exc_info = e , fatal = fatal )
Run program with args
495
4
12,008
def get_dept_name ( self ) : self . logger . info ( "%s\t%s" % ( self . request_method , self . request_url ) ) return self . json_response . get ( "name" , None )
Method to get the department name
54
6
12,009
def get_dept_manager_ids ( self ) : self . logger . info ( "%s\t%s" % ( self . request_method , self . request_url ) ) return self . json_response . get ( "deptManagerUseridList" , None )
Method to get the id list of department manager .
61
10
12,010
def get_depts ( self , dept_name = None ) : depts = self . json_response . get ( "department" , None ) params = self . kwargs . get ( "params" , None ) fetch_child = params . get ( "fetch_child" , True ) if params else True if dept_name is not None : depts = [ dept for dept in depts if dept [ "name" ] == dept_name ] depts = [ { "id" : dept [ "id" ] , "name" : dept [ "name" ] } for dept in depts ] self . logger . info ( "%s\t%s" % ( self . request_method , self . request_url ) ) return depts if fetch_child else depts [ 0 ]
Method to get department by name .
191
7
12,011
def setup ( app ) : jira . setup ( app ) lsstdocushare . setup ( app ) mockcoderefs . setup ( app ) packagetoctree . setup ( app ) remotecodeblock . setup ( app ) try : __version__ = get_distribution ( 'documenteer' ) . version except DistributionNotFound : # package is not installed __version__ = 'unknown' return { 'version' : __version__ , 'parallel_read_safe' : True , 'parallel_write_safe' : True }
Wrapper for the setup functions of each individual extension module .
119
12
12,012
def task_ref_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) : # app = inliner.document.settings.env.app node = pending_task_xref ( rawsource = text ) return [ node ] , [ ]
Process a role that references the target nodes created by the lsst - task directive .
63
17
12,013
def process_pending_task_xref_nodes ( app , doctree , fromdocname ) : logger = getLogger ( __name__ ) env = app . builder . env for node in doctree . traverse ( pending_task_xref ) : content = [ ] # The source of the node is the class name the user entered via the # lsst-task-topic role. For example: # lsst.pipe.tasks.processCcd.ProcessCcdTask role_parts = split_role_content ( node . rawsource ) task_id = format_task_id ( role_parts [ 'ref' ] ) if role_parts [ 'display' ] : # user's custom display text display_text = role_parts [ 'display' ] elif role_parts [ 'last_component' ] : # just the name of the class display_text = role_parts [ 'ref' ] . split ( '.' ) [ - 1 ] else : display_text = role_parts [ 'ref' ] link_label = nodes . literal ( ) link_label += nodes . Text ( display_text , display_text ) if hasattr ( env , 'lsst_task_topics' ) and task_id in env . lsst_task_topics : # A task topic, marked up with the lsst-task-topic directive is # available task_data = env . lsst_task_topics [ task_id ] ref_node = nodes . reference ( '' , '' ) ref_node [ 'refdocname' ] = task_data [ 'docname' ] ref_node [ 'refuri' ] = app . builder . get_relative_uri ( fromdocname , task_data [ 'docname' ] ) ref_node [ 'refuri' ] += '#' + task_data [ 'target' ] [ 'refid' ] ref_node += link_label content . append ( ref_node ) else : # Fallback if the task topic isn't known. Just print the label text content . append ( link_label ) message = 'lsst-task could not find a reference to %s' logger . warning ( message , role_parts [ 'ref' ] , location = node ) # replacing the pending_task_xref node with this reference node . replace_self ( content )
Process the pending_task_xref nodes during the doctree - resolved event to insert links to the locations of lsst - task - topic directives .
507
31
12,014
def config_ref_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) : node = pending_config_xref ( rawsource = text ) return [ node ] , [ ]
Process a role that references the target nodes created by the lsst - config - topic directive .
50
19
12,015
def process_pending_config_xref_nodes ( app , doctree , fromdocname ) : logger = getLogger ( __name__ ) env = app . builder . env for node in doctree . traverse ( pending_config_xref ) : content = [ ] # The source of the node is the content the authored entered in the # lsst-config role role_parts = split_role_content ( node . rawsource ) config_id = format_config_id ( role_parts [ 'ref' ] ) if role_parts [ 'display' ] : # user's custom display text display_text = role_parts [ 'display' ] elif role_parts [ 'last_component' ] : # just the name of the class display_text = role_parts [ 'ref' ] . split ( '.' ) [ - 1 ] else : display_text = role_parts [ 'ref' ] link_label = nodes . literal ( ) link_label += nodes . Text ( display_text , display_text ) if hasattr ( env , 'lsst_task_topics' ) and config_id in env . lsst_task_topics : # A config topic, marked up with the lsst-task directive is # available config_data = env . lsst_task_topics [ config_id ] ref_node = nodes . reference ( '' , '' ) ref_node [ 'refdocname' ] = config_data [ 'docname' ] ref_node [ 'refuri' ] = app . builder . get_relative_uri ( fromdocname , config_data [ 'docname' ] ) ref_node [ 'refuri' ] += '#' + config_data [ 'target' ] [ 'refid' ] ref_node += link_label content . append ( ref_node ) else : # Fallback if the config topic isn't known. Just print the # role's formatted content. content . append ( link_label ) message = 'lsst-config could not find a reference to %s' logger . warning ( message , role_parts [ 'ref' ] , location = node ) # replacing the pending_config_xref node with this reference node . replace_self ( content )
Process the pending_config_xref nodes during the doctree - resolved event to insert links to the locations of lsst - config - topic directives .
485
31
12,016
def configfield_ref_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) : node = pending_configfield_xref ( rawsource = text ) return [ node ] , [ ]
Process a role that references the Task configuration field nodes created by the lsst - config - fields lsst - task - config - subtasks and lsst - task - config - subtasks directives .
52
40
12,017
def process_pending_configfield_xref_nodes ( app , doctree , fromdocname ) : logger = getLogger ( __name__ ) env = app . builder . env for node in doctree . traverse ( pending_configfield_xref ) : content = [ ] # The source is the text the user entered into the role, which is # the importable name of the config class's and the attribute role_parts = split_role_content ( node . rawsource ) namespace_components = role_parts [ 'ref' ] . split ( '.' ) field_name = namespace_components [ - 1 ] class_namespace = namespace_components [ : - 1 ] configfield_id = format_configfield_id ( class_namespace , field_name ) if role_parts [ 'display' ] : # user's custom display text display_text = role_parts [ 'display' ] elif role_parts [ 'last_component' ] : # just the name of the class display_text = role_parts [ 'ref' ] . split ( '.' ) [ - 1 ] else : display_text = role_parts [ 'ref' ] link_label = nodes . literal ( ) link_label += nodes . Text ( display_text , display_text ) if hasattr ( env , 'lsst_configfields' ) and configfield_id in env . lsst_configfields : # A config field topic is available configfield_data = env . lsst_configfields [ configfield_id ] ref_node = nodes . reference ( '' , '' ) ref_node [ 'refdocname' ] = configfield_data [ 'docname' ] ref_node [ 'refuri' ] = app . builder . get_relative_uri ( fromdocname , configfield_data [ 'docname' ] ) ref_node [ 'refuri' ] += '#' + configfield_id ref_node += link_label content . append ( ref_node ) else : # Fallback if the config field isn't known. Just print the Config # field attribute name literal_node = nodes . literal ( ) link_label = nodes . Text ( field_name , field_name ) literal_node += link_label content . append ( literal_node ) message = 'lsst-config-field could not find a reference to %s' logger . warning ( message , role_parts [ 'ref' ] , location = node ) # replacing the pending_configfield_xref node with this reference node . replace_self ( content )
Process the pending_configfield_xref nodes during the doctree - resolved event to insert links to the locations of configuration field nodes .
555
28
12,018
def to_bytesize ( value , default_unit = None , base = DEFAULT_BASE ) : if isinstance ( value , ( int , float ) ) : return unitized ( value , default_unit , base ) if value is None : return None try : if value [ - 1 ] . lower ( ) == "b" : # Accept notations such as "1mb", as they get used out of habit value = value [ : - 1 ] unit = value [ - 1 : ] . lower ( ) if unit . isdigit ( ) : unit = default_unit else : value = value [ : - 1 ] return unitized ( to_number ( float , value ) , unit , base ) except ( IndexError , TypeError , ValueError ) : return None
Convert value to bytes accepts notations such as 4k to mean 4096 bytes
163
16
12,019
def to_number ( result_type , value , default = None , minimum = None , maximum = None ) : try : return capped ( result_type ( value ) , minimum , maximum ) except ( TypeError , ValueError ) : return default
Cast value to numeric result_type if possible
51
9
12,020
def set_providers ( self , * providers ) : if self . providers : self . clear ( ) for provider in providers : self . add ( provider )
Replace current providers with given ones
33
7
12,021
def get_bytesize ( self , key , default = None , minimum = None , maximum = None , default_unit = None , base = DEFAULT_BASE ) : value = to_bytesize ( self . get_str ( key ) , default_unit , base ) if value is None : return to_bytesize ( default , default_unit , base ) return capped ( value , to_bytesize ( minimum , default_unit , base ) , to_bytesize ( maximum , default_unit , base ) )
Size in bytes expressed by value configured under key
111
9
12,022
def install ( application , * * kwargs ) : if getattr ( application , 'statsd' , None ) is not None : LOGGER . warning ( 'Statsd collector is already installed' ) return False if 'host' not in kwargs : kwargs [ 'host' ] = os . environ . get ( 'STATSD_HOST' , '127.0.0.1' ) if 'port' not in kwargs : kwargs [ 'port' ] = os . environ . get ( 'STATSD_PORT' , '8125' ) if 'protocol' not in kwargs : kwargs [ 'protocol' ] = os . environ . get ( 'STATSD_PROTOCOL' , 'udp' ) setattr ( application , 'statsd' , StatsDCollector ( * * kwargs ) ) return True
Call this to install StatsD for the Tornado application .
194
11
12,023
def record_timing ( self , duration , * path ) : self . application . statsd . send ( path , duration * 1000.0 , 'ms' )
Record a timing .
35
4
12,024
def increase_counter ( self , * path , * * kwargs ) : self . application . statsd . send ( path , kwargs . get ( 'amount' , '1' ) , 'c' )
Increase a counter .
47
4
12,025
def execution_timer ( self , * path ) : start = time . time ( ) try : yield finally : self . record_timing ( max ( start , time . time ( ) ) - start , * path )
Record the time it takes to perform an arbitrary code block .
46
12
12,026
def on_finish ( self ) : super ( ) . on_finish ( ) self . record_timing ( self . request . request_time ( ) , self . __class__ . __name__ , self . request . method , self . get_status ( ) )
Records the time taken to process the request .
60
10
12,027
async def _tcp_on_closed ( self ) : LOGGER . warning ( 'Not connected to statsd, connecting in %s seconds' , self . _tcp_reconnect_sleep ) await asyncio . sleep ( self . _tcp_reconnect_sleep ) self . _sock = self . _tcp_socket ( )
Invoked when the socket is closed .
76
8
12,028
def send ( self , path , value , metric_type ) : msg = self . _msg_format . format ( path = self . _build_path ( path , metric_type ) , value = value , metric_type = metric_type ) LOGGER . debug ( 'Sending %s to %s:%s' , msg . encode ( 'ascii' ) , self . _host , self . _port ) try : if self . _tcp : if self . _sock . closed ( ) : return return self . _sock . write ( msg . encode ( 'ascii' ) ) self . _sock . sendto ( msg . encode ( 'ascii' ) , ( self . _host , self . _port ) ) except iostream . StreamClosedError as error : # pragma: nocover LOGGER . warning ( 'Error sending TCP statsd metric: %s' , error ) except ( OSError , socket . error ) as error : # pragma: nocover LOGGER . exception ( 'Error sending statsd metric: %s' , error )
Send a metric to Statsd .
242
7
12,029
def _build_path ( self , path , metric_type ) : path = self . _get_prefixes ( metric_type ) + list ( path ) return '{}.{}' . format ( self . _namespace , '.' . join ( str ( p ) . replace ( '.' , '-' ) for p in path ) )
Return a normalized path .
74
5
12,030
def _get_prefixes ( self , metric_type ) : prefixes = [ ] if self . _prepend_metric_type : prefixes . append ( self . METRIC_TYPES [ metric_type ] ) return prefixes
Get prefixes where applicable
53
5
12,031
def _show_feedback_label ( self , message , seconds = None ) : if seconds is None : seconds = CONFIG [ 'MESSAGE_DURATION' ] logger . debug ( 'Label feedback: "{}"' . format ( message ) ) self . feedback_label_timer . timeout . connect ( self . _hide_feedback_label ) self . lbl_feedback . setText ( str ( message ) ) self . lbl_feedback . show ( ) self . feedback_label_timer . start ( 1000 * seconds )
Display a message in lbl_feedback which times out after some number of seconds .
118
18
12,032
def update_user_type ( self ) : if self . rb_tutor . isChecked ( ) : self . user_type = 'tutor' elif self . rb_student . isChecked ( ) : self . user_type = 'student' self . accept ( )
Return either tutor or student based on which radio button is selected .
64
13
12,033
def shuffle_sattolo ( items ) : _randrange = random . randrange for i in reversed ( range ( 1 , len ( items ) ) ) : j = _randrange ( i ) # 0 <= j < i items [ j ] , items [ i ] = items [ i ] , items [ j ]
Shuffle items in place using Sattolo s algorithm .
67
12
12,034
def column_list ( string ) : try : columns = list ( map ( int , string . split ( ',' ) ) ) except ValueError as e : raise argparse . ArgumentTypeError ( * e . args ) for column in columns : if column < 1 : raise argparse . ArgumentTypeError ( 'Invalid column {!r}: column numbers start at 1.' . format ( column ) ) return columns
Validate and convert comma - separated list of column numbers .
85
12
12,035
def main ( ) : parser = argparse . ArgumentParser ( description = 'Shuffle columns in a CSV file' ) parser . add_argument ( metavar = "FILE" , dest = 'input_file' , type = argparse . FileType ( 'r' ) , nargs = '?' , default = sys . stdin , help = 'Input CSV file. If omitted, read standard input.' ) parser . add_argument ( '-s' , '--sattolo' , action = 'store_const' , const = shuffle_sattolo , dest = 'shuffle' , default = random . shuffle , help = "Use Sattolo's shuffle algorithm." ) col_group = parser . add_mutually_exclusive_group ( ) col_group . add_argument ( '-c' , '--columns' , type = column_list , help = 'Comma-separated list of columns to include.' ) col_group . add_argument ( '-C' , '--no-columns' , type = column_list , help = 'Comma-separated list of columns to exclude.' ) delim_group = parser . add_mutually_exclusive_group ( ) delim_group . add_argument ( '-d' , '--delimiter' , type = str , default = ',' , help = 'Input column delimiter.' ) delim_group . add_argument ( '-t' , '--tabbed' , dest = 'delimiter' , action = 'store_const' , const = '\t' , help = 'Delimit input with tabs.' ) parser . add_argument ( '-q' , '--quotechar' , type = str , default = '"' , help = 'Quote character.' ) parser . add_argument ( '-o' , '--output-delimiter' , type = str , default = ',' , help = 'Output column delimiter.' ) parser . add_argument ( '-v' , '--version' , action = 'version' , version = '%(prog)s {version}' . format ( version = __version__ ) ) args = parser . parse_args ( ) reader = csv . reader ( args . input_file , delimiter = args . delimiter , quotechar = args . quotechar ) headers = next ( reader ) """Create a matrix of lists of columns""" table = [ ] for c in range ( len ( headers ) ) : table . append ( [ ] ) for row in reader : for c in range ( len ( headers ) ) : table [ c ] . append ( row [ c ] ) cols = args . columns if args . no_columns : """If columns to exclude are provided, get a list of all other columns""" cols = list ( set ( range ( len ( headers ) ) ) - set ( args . no_columns ) ) elif not cols : """If no columns are provided all columns will be shuffled""" cols = range ( len ( headers ) ) for c in cols : if c > len ( headers ) : sys . stderr . write ( 'Invalid column {0}. Last column is {1}.\n' . format ( c , len ( headers ) ) ) exit ( 1 ) args . shuffle ( table [ c - 1 ] ) """Transpose the matrix""" table = zip ( * table ) writer = csv . writer ( sys . stdout , delimiter = args . output_delimiter ) writer . writerow ( headers ) for row in table : writer . writerow ( row )
Get the first row and use it as column headers
774
10
12,036
def get ( self , keyword ) : if not keyword . startswith ( ':' ) : keyword = ':' + keyword for i , s in enumerate ( self . data ) : if s . to_string ( ) . upper ( ) == keyword . upper ( ) : if i < len ( self . data ) - 1 : return self . data [ i + 1 ] else : return None return None
Return the element of the list after the given keyword .
85
11
12,037
def gets ( self , keyword ) : param = self . get ( keyword ) if param is not None : return safe_decode ( param . string_value ( ) ) return None
Return the element of the list after the given keyword as string .
38
13
12,038
def append ( self , obj ) : if isinstance ( obj , str ) : obj = KQMLToken ( obj ) self . data . append ( obj )
Append an element to the end of the list .
34
11
12,039
def push ( self , obj ) : if isinstance ( obj , str ) : obj = KQMLToken ( obj ) self . data . insert ( 0 , obj )
Prepend an element to the beginnging of the list .
36
13
12,040
def set ( self , keyword , value ) : if not keyword . startswith ( ':' ) : keyword = ':' + keyword if isinstance ( value , str ) : value = KQMLToken ( value ) if isinstance ( keyword , str ) : keyword = KQMLToken ( keyword ) found = False for i , key in enumerate ( self . data ) : if key . to_string ( ) . lower ( ) == keyword . lower ( ) : found = True if i < len ( self . data ) - 1 : self . data [ i + 1 ] = value break if not found : self . data . append ( keyword ) self . data . append ( value )
Set the element of the list after the given keyword .
145
11
12,041
def sets ( self , keyword , value ) : if isinstance ( value , str ) : value = KQMLString ( value ) self . set ( keyword , value )
Set the element of the list after the given keyword as string .
36
13
12,042
def read_recipe ( self , filename ) : Global . LOGGER . debug ( f"reading recipe {filename}" ) if not os . path . isfile ( filename ) : Global . LOGGER . error ( filename + " recipe not found, skipping" ) return config = configparser . ConfigParser ( allow_no_value = True , delimiters = "=" ) config . read ( filename ) for section in config . sections ( ) : self . sections [ section ] = config [ section ] Global . LOGGER . debug ( "Read recipe " + filename )
Read a recipe file from disk
119
6
12,043
def set_socket_address ( self ) : Global . LOGGER . debug ( 'defining socket addresses for zmq' ) random . seed ( ) default_port = random . randrange ( 5001 , 5999 ) internal_0mq_address = "tcp://127.0.0.1" internal_0mq_port_subscriber = str ( default_port ) internal_0mq_port_publisher = str ( default_port ) Global . LOGGER . info ( str . format ( f"zmq subsystem subscriber on {internal_0mq_port_subscriber} port" ) ) Global . LOGGER . info ( str . format ( f"zmq subsystem publisher on {internal_0mq_port_publisher} port" ) ) self . subscriber_socket_address = f"{internal_0mq_address}:{internal_0mq_port_subscriber}" self . publisher_socket_address = f"{internal_0mq_address}:{internal_0mq_port_publisher}"
Set a random port to be used by zmq
237
11
12,044
def get_quantities ( self , quantities , filters = None , native_filters = None , return_iterator = False ) : quantities = self . _preprocess_requested_quantities ( quantities ) filters = self . _preprocess_filters ( filters ) native_filters = self . _preprocess_native_filters ( native_filters ) it = self . _get_quantities_iter ( quantities , filters , native_filters ) if return_iterator : return it data_all = defaultdict ( list ) for data in it : for q in quantities : data_all [ q ] . append ( data [ q ] ) return { q : concatenate_1d ( data_all [ q ] ) for q in quantities }
Fetch quantities from this catalog .
162
7
12,045
def list_all_quantities ( self , include_native = False , with_info = False ) : q = set ( self . _quantity_modifiers ) if include_native : q . update ( self . _native_quantities ) return { k : self . get_quantity_info ( k ) for k in q } if with_info else list ( q )
Return a list of all available quantities in this catalog .
82
11
12,046
def list_all_native_quantities ( self , with_info = False ) : q = self . _native_quantities return { k : self . get_quantity_info ( k ) for k in q } if with_info else list ( q )
Return a list of all available native quantities in this catalog .
57
12
12,047
def first_available ( self , * quantities ) : for i , q in enumerate ( quantities ) : if self . has_quantity ( q ) : if i : warnings . warn ( '{} not available; using {} instead' . format ( quantities [ 0 ] , q ) ) return q
Return the first available quantity in the input arguments . Return None if none of them is available .
63
19
12,048
def get_input_kwargs ( self , key = None , default = None ) : warnings . warn ( "`get_input_kwargs` is deprecated; use `get_catalog_info` instead." , DeprecationWarning ) return self . get_catalog_info ( key , default )
Deprecated . Use get_catalog_info instead .
66
12
12,049
def add_quantity_modifier ( self , quantity , modifier , overwrite = False ) : if quantity in self . _quantity_modifiers and not overwrite : raise ValueError ( 'quantity `{}` already exists' . format ( quantity ) ) self . _quantity_modifiers [ quantity ] = modifier self . _check_quantities_exist ( [ quantity ] , raise_exception = False )
Add a quantify modifier . Consider useing the high - level function add_derived_quantity instead!
89
21
12,050
def get_normalized_quantity_modifier ( self , quantity ) : modifier = self . _quantity_modifiers . get ( quantity , self . _default_quantity_modifier ) if modifier is None : return ( trivial_callable , quantity ) if callable ( modifier ) : return ( modifier , quantity ) if isinstance ( modifier , ( tuple , list ) ) and len ( modifier ) > 1 and callable ( modifier [ 0 ] ) : return modifier return ( trivial_callable , modifier )
Retrive a quantify modifier normalized . This function would also return a tuple with the first item a callable and the rest native quantity names
110
27
12,051
def add_derived_quantity ( self , derived_quantity , func , * quantities ) : if derived_quantity in self . _quantity_modifiers : raise ValueError ( 'quantity name `{}` already exists' . format ( derived_quantity ) ) if set ( quantities ) . issubset ( self . _native_quantities ) : new_modifier = ( func , ) + quantities else : functions = [ ] quantities_needed = [ ] quantity_count = [ ] for q in quantities : modifier = self . get_normalized_quantity_modifier ( q ) functions . append ( modifier [ 0 ] ) quantities_needed . extend ( modifier [ 1 : ] ) quantity_count . append ( len ( modifier ) - 1 ) def _new_func ( * x ) : assert len ( x ) == sum ( quantity_count ) count_current = 0 new_args = [ ] for func_this , count in zip ( functions , quantity_count ) : new_args . append ( func_this ( * x [ count_current : count_current + count ] ) ) count_current += count return func ( * new_args ) new_modifier = ( _new_func , ) + tuple ( quantities_needed ) self . add_quantity_modifier ( derived_quantity , new_modifier )
Add a derived quantify modifier .
290
6
12,052
def add_modifier_on_derived_quantities ( self , new_quantity , func , * quantities ) : warnings . warn ( "Use `add_derived_quantity` instead." , DeprecationWarning ) self . add_derived_quantity ( new_quantity , func , * quantities )
Deprecated . Use add_derived_quantity instead .
67
12
12,053
def check_for_wdiff ( ) : cmd = [ 'which' , CMD_WDIFF ] DEVNULL = open ( os . devnull , 'wb' ) proc = sub . Popen ( cmd , stdout = DEVNULL ) proc . wait ( ) DEVNULL . close ( ) if proc . returncode != 0 : msg = "the `{}` command can't be found" . format ( CMD_WDIFF ) raise WdiffNotFoundError ( msg )
Checks if the wdiff command can be found .
106
11
12,054
def generate_wdiff ( org_file , new_file , fold_tags = False , html = True ) : check_for_wdiff ( ) cmd = [ CMD_WDIFF ] if html : cmd . extend ( OPTIONS_OUTPUT ) if not fold_tags : cmd . extend ( OPTIONS_LINEBREAK ) cmd . extend ( [ org_file , new_file ] ) proc = sub . Popen ( cmd , stdout = sub . PIPE ) diff , _ = proc . communicate ( ) return diff . decode ( 'utf-8' )
Returns the results from the wdiff command as a string .
125
12
12,055
def body ( self , master ) : self . frame = ttk . Frame ( master , padding = ( 5 , 5 , 10 , 10 ) ) self . lbl_message = ttk . Label ( self . frame , text = 'Select User Type: ' , ) self . rb_student = ttk . Radiobutton ( self . frame , text = 'Student' , variable = self . rb_choice , value = 'student' , ) self . rb_tutor = ttk . Radiobutton ( self . frame , text = 'Tutor' , variable = self . rb_choice , value = 'tutor' , ) self . btn_ok = ttk . Button ( self . frame , text = 'OK' , command = self . ok , ) self . btn_cancel = ttk . Button ( self . frame , text = 'Cancel' , command = self . cancel , ) # assemble grid self . frame . grid ( column = 0 , row = 0 , sticky = ( N , S , E , W ) ) self . lbl_message . grid ( column = 0 , row = 0 , columnspan = 2 , sticky = ( W , E ) ) self . rb_student . grid ( column = 0 , row = 1 , columnspan = 2 , sticky = W ) self . rb_tutor . grid ( column = 0 , row = 2 , columnspan = 2 , sticky = W ) self . btn_ok . grid ( column = 0 , row = 3 ) self . btn_cancel . grid ( column = 1 , row = 3 ) # key bindings self . bind ( '<Return>' , self . ok ) self . bind ( '<KP_Enter>' , self . ok ) self . bind ( '<Escape>' , self . cancel ) self . rb_tutor . invoke ( ) return self . btn_ok
Create dialog body . Return widget that should have initial focus .
416
12
12,056
def apply ( self ) : user_type = self . rb_choice . get ( ) if user_type == 'student' or user_type == 'tutor' : self . result = user_type
Inherited from tkinter . simpledialog . Dialog
45
15
12,057
def flag_forgotten_entries ( session , today = None ) : # noqa today = date . today ( ) if today is None else today forgotten = ( session . query ( Entry ) . filter ( Entry . time_out . is_ ( None ) ) . filter ( Entry . forgot_sign_out . is_ ( False ) ) . filter ( Entry . date < today ) ) for entry in forgotten : e = sign_out ( entry , forgot = True ) logger . debug ( 'Signing out forgotten entry: {}' . format ( e ) ) session . add ( e ) session . commit ( )
Flag any entries from previous days where users forgot to sign out .
130
13
12,058
def signed_in_users ( session = None , today = None , full_name = True ) : # noqa if session is None : session = Session ( ) else : session = session if today is None : today = date . today ( ) else : today = today signed_in_users = ( session . query ( User ) . filter ( Entry . date == today ) . filter ( Entry . time_out . is_ ( None ) ) . filter ( User . user_id == Entry . user_id ) . all ( ) ) session . close ( ) return signed_in_users
Return list of names of currently signed in users .
125
10
12,059
def get_user_name ( user , full_name = True ) : # noqa try : if full_name : name = ' ' . join ( [ user . first_name , user . last_name ] ) else : name = user . first_name except AttributeError : name = None return name
Return the user s name as a string .
66
9
12,060
def sign_in ( user , user_type = None , date = None , time_in = None ) : # noqa now = datetime . today ( ) if date is None : date = now . date ( ) if time_in is None : time_in = now . time ( ) if user_type is None : if user . is_student and user . is_tutor : raise AmbiguousUserType ( 'User is both a student and a tutor.' ) elif user . is_student : user_type = 'student' elif user . is_tutor : user_type = 'tutor' else : raise ValueError ( 'Unknown user type.' ) new_entry = Entry ( uuid = str ( uuid . uuid4 ( ) ) , date = date , time_in = time_in , time_out = None , user_id = user . user_id , user_type = user_type , user = user , ) logger . info ( '{} ({}) signed in.' . format ( new_entry . user_id , new_entry . user_type ) ) return new_entry
Add a new entry to the timesheet .
243
9
12,061
def sign_out ( entry , time_out = None , forgot = False ) : # noqa if time_out is None : time_out = datetime . today ( ) . time ( ) if forgot : entry . forgot_sign_out = True logger . info ( '{} forgot to sign out on {}.' . format ( entry . user_id , entry . date ) ) else : entry . time_out = time_out logger . info ( '{} ({}) signed out.' . format ( entry . user_id , entry . user_type ) ) return entry
Sign out of an existing entry in the timesheet . If the user forgot to sign out flag the entry .
123
22
12,062
def undo_sign_in ( entry , session = None ) : # noqa if session is None : session = Session ( ) else : session = session entry_to_delete = ( session . query ( Entry ) . filter ( Entry . uuid == entry . uuid ) . one_or_none ( ) ) if entry_to_delete : logger . info ( 'Undo sign in: {}' . format ( entry_to_delete . user_id ) ) logger . debug ( 'Undo sign in: {}' . format ( entry_to_delete ) ) session . delete ( entry_to_delete ) session . commit ( ) else : error_message = 'Entry not found: {}' . format ( entry ) logger . error ( error_message ) raise ValueError ( error_message )
Delete a signed in entry .
171
6
12,063
def undo_sign_out ( entry , session = None ) : # noqa if session is None : session = Session ( ) else : session = session entry_to_sign_in = ( session . query ( Entry ) . filter ( Entry . uuid == entry . uuid ) . one_or_none ( ) ) if entry_to_sign_in : logger . info ( 'Undo sign out: {}' . format ( entry_to_sign_in . user_id ) ) logger . debug ( 'Undo sign out: {}' . format ( entry_to_sign_in ) ) entry_to_sign_in . time_out = None session . add ( entry_to_sign_in ) session . commit ( ) else : error_message = 'Entry not found: {}' . format ( entry ) logger . error ( error_message ) raise ValueError ( error_message )
Sign in a signed out entry .
194
7
12,064
def sign ( user_id , user_type = None , today = None , session = None ) : # noqa if session is None : session = Session ( ) else : session = session if today is None : today = date . today ( ) else : today = today user = ( session . query ( User ) . filter ( User . user_id == user_id ) . one_or_none ( ) ) if user : signed_in_entries = ( user . entries . filter ( Entry . date == today ) . filter ( Entry . time_out . is_ ( None ) ) . all ( ) ) if not signed_in_entries : new_entry = sign_in ( user , user_type = user_type ) session . add ( new_entry ) status = Status ( valid = True , in_or_out = 'in' , user_name = get_user_name ( user ) , user_type = new_entry . user_type , entry = new_entry ) else : for entry in signed_in_entries : signed_out_entry = sign_out ( entry ) session . add ( signed_out_entry ) status = Status ( valid = True , in_or_out = 'out' , user_name = get_user_name ( user ) , user_type = signed_out_entry . user_type , entry = signed_out_entry ) session . commit ( ) else : raise UnregisteredUser ( '{} not registered. Please register at the front desk.' . format ( user_id ) ) logger . debug ( status ) return status
Check user id for validity then sign user in if they are signed out or out if they are signed in .
343
22
12,065
def format_request ( self ) : fmt = '{now} {status} {requestline} ({client_address}) {response_length} {delta}ms' requestline = getattr ( self , 'requestline' ) if requestline : # Original "GET / HTTP/1.1", remove the "HTTP/1.1" requestline = ' ' . join ( requestline . split ( ' ' ) [ : - 1 ] ) else : requestline = '???' if self . time_finish : delta = '%.2f' % ( ( self . time_finish - self . time_start ) * 1000 ) else : delta = '-' data = dict ( now = datetime . datetime . now ( ) . replace ( microsecond = 0 ) , response_length = self . response_length or '-' , client_address = self . client_address [ 0 ] if isinstance ( self . client_address , tuple ) else self . client_address , status = str ( self . _get_status_int ( ) ) , requestline = requestline , delta = delta , ) return fmt . format ( * * data )
Override for better log format
248
5
12,066
def handle_error ( self , type_ , value , tb ) : if not issubclass ( type_ , pywsgi . GreenletExit ) : self . server . loop . handle_error ( self . environ , type_ , value , tb ) if self . response_length : self . close_connection = True else : tb_stream = traceback . format_exception ( type_ , value , tb ) del tb tb_stream . append ( '\n' ) tb_stream . append ( pprint . pformat ( self . environ ) ) body = '' . join ( tb_stream ) headers = pywsgi . _INTERNAL_ERROR_HEADERS [ : ] headers [ 2 ] = ( 'Content-Length' , str ( len ( body ) ) ) self . start_response ( pywsgi . _INTERNAL_ERROR_STATUS , headers ) self . write ( body )
This method copies the code from pywsgi . WSGIHandler . handle_error change the write part to be a reflection of traceback and environ
205
31
12,067
def clear_measurements ( self ) : keys = list ( self . measurements . keys ( ) ) for key in keys : del ( self . measurements [ key ] ) self . meas_counter = - 1
Remove all measurements from self . measurements . Reset the measurement counter . All ID are invalidated .
44
19
12,068
def add_measurements ( self , measurements ) : subdata = np . atleast_2d ( measurements ) if self . configs is None : raise Exception ( 'must read in configuration before measurements can be stored' ) # we try to accommodate transposed input if subdata . shape [ 1 ] != self . configs . shape [ 0 ] : if subdata . shape [ 0 ] == self . configs . shape [ 0 ] : subdata = subdata . T else : raise Exception ( 'Number of measurements does not match number of configs' ) return_ids = [ ] for dataset in subdata : cid = self . _get_next_index ( ) self . measurements [ cid ] = dataset . copy ( ) return_ids . append ( cid ) if len ( return_ids ) == 1 : return return_ids [ 0 ] else : return return_ids
Add new measurements to this instance
190
6
12,069
def gen_all_voltages_for_injections ( self , injections_raw ) : injections = injections_raw . astype ( int ) N = self . nr_electrodes all_quadpoles = [ ] for idipole in injections : # sort current electrodes and convert to array indices Icurrent = np . sort ( idipole ) - 1 # voltage electrodes velecs = list ( range ( 1 , N + 1 ) ) # remove current electrodes del ( velecs [ Icurrent [ 1 ] ] ) del ( velecs [ Icurrent [ 0 ] ] ) # permutate remaining voltages = itertools . permutations ( velecs , 2 ) for voltage in voltages : all_quadpoles . append ( ( idipole [ 0 ] , idipole [ 1 ] , voltage [ 0 ] , voltage [ 1 ] ) ) configs_unsorted = np . array ( all_quadpoles ) # sort AB and MN configs_sorted = np . hstack ( ( np . sort ( configs_unsorted [ : , 0 : 2 ] , axis = 1 ) , np . sort ( configs_unsorted [ : , 2 : 4 ] , axis = 1 ) , ) ) configs = self . remove_duplicates ( configs_sorted ) self . add_to_configs ( configs ) self . remove_duplicates ( ) return configs
For a given set of current injections AB generate all possible unique potential measurements .
308
15
12,070
def gen_wenner ( self , a ) : configs = [ ] for i in range ( 1 , self . nr_electrodes - 3 * a + 1 ) : configs . append ( ( i , i + a , i + 2 * a , i + 3 * a ) , ) configs = np . array ( configs ) self . add_to_configs ( configs ) return configs
Generate Wenner measurement configurations .
90
7
12,071
def gen_reciprocals ( self , quadrupoles ) : reciprocals = quadrupoles [ : , : : - 1 ] . copy ( ) reciprocals [ : , 0 : 2 ] = np . sort ( reciprocals [ : , 0 : 2 ] , axis = 1 ) reciprocals [ : , 2 : 4 ] = np . sort ( reciprocals [ : , 2 : 4 ] , axis = 1 ) return reciprocals
For a given set of quadrupoles generate and return reciprocals
92
13
12,072
def compute_K_factors ( self , spacing = None , configs = None , numerical = False , elem_file = None , elec_file = None ) : if configs is None : use_configs = self . configs else : use_configs = configs if numerical : settings = { 'elem' : elem_file , 'elec' : elec_file , 'rho' : 100 , } K = edfK . compute_K_numerical ( use_configs , settings ) else : K = edfK . compute_K_analytical ( use_configs , spacing = spacing ) return K
Compute analytical geometrical factors .
143
8
12,073
def applies ( self , src , dst ) : if self . _src_pattern and ( src is None or re . search ( self . _src_pattern , src ) is None ) : return False elif self . _dst_pattern and ( dst is None or re . search ( self . _dst_pattern , dst ) is None ) : return False return True
Checks if this rule applies to the given src and dst paths based on the src pattern and dst pattern given in the constructor .
79
26
12,074
def _createunbound ( kls , * * info ) : if issubclass ( kls , Bitfield ) : nodetype = UnboundBitfieldNode elif hasattr ( kls , '_fields_' ) : nodetype = UnboundStructureNode elif issubclass ( kls , ctypes . Array ) : nodetype = UnboundArrayNode else : nodetype = UnboundSimpleNode return nodetype ( type = kls , * * info )
Create a new UnboundNode representing a given class .
106
11
12,075
def _createbound ( obj ) : # Start by allowing objects to define custom unbound reference hooks try : kls = obj . _unboundreference_ ( ) except AttributeError : kls = type ( obj ) unbound = _createunbound ( kls ) def valueget ( ) : return obj for t in ( BoundBitfieldNode , BoundStructureNode , BoundArrayNode ) : if isinstance ( unbound , t . _unboundtype ) : kls = t break else : kls = BoundSimpleNode child = kls ( unbound , valueget ) return child
Create a new BoundNode representing a given object .
125
10
12,076
def display ( obj , skiphidden = True , * * printargs ) : top = findnode ( obj ) #------------------------------------------------------------------- # Iterate through the entire structure turning all the nodes into # tuples of strings for display. maxhex = len ( hex ( ctypes . sizeof ( top . type ) ) ) - 2 def addrformat ( addr ) : if isinstance ( addr , int ) : return "0x{0:0{1}X}" . format ( addr , maxhex ) else : intpart = int ( addr ) fracbits = int ( ( addr - intpart ) * 8 ) return "0x{0:0{1}X}'{2}" . format ( intpart , maxhex , fracbits ) def formatval ( here ) : if isinstance ( here , BoundSimpleNode ) : return "{0}({1})" . format ( here . type . __name__ , here . value ) else : return str ( here . value ) if isinstance ( top , UnboundNode ) : headers = [ 'Path' , 'Addr' , 'Type' ] results = [ ( ( ' ' * n . depth ) + n . name , addrformat ( n . baseoffset ) , n . type . __name__ ) for n in walknode ( top , skiphidden ) ] else : headers = [ 'Path' , 'Addr' , 'Value' ] results = [ ( ( ' ' * n . depth ) + n . name , addrformat ( n . baseoffset ) , formatval ( n ) ) for n in walknode ( top , skiphidden ) ] #------------------------------------------------------------------- # Determine the maximum width of the text in each column, make the # column always that wide. widths = [ max ( max ( len ( d [ col ] ) for d in results ) , len ( h ) ) for col , h in enumerate ( headers ) ] #------------------------------------------------------------------- # Print out the tabular data. def lp ( args ) : print ( * args , * * printargs ) lp ( d . center ( w ) for d , w in zip ( headers , widths ) ) lp ( '-' * w for w in widths ) for r in results : lp ( d . ljust ( w ) for d , w in zip ( r , widths ) )
Print a view of obj where obj is either a ctypes - derived class or an instance of such a class . Any additional keyword arguments are passed directly to the print function . This is mostly useful to introspect structures from an interactive session .
497
48
12,077
def pathparts ( self ) : try : parts = self . parent . pathparts ( ) parts . append ( self . name ) return parts except AttributeError : return [ ]
A list of the parts of the path with the root node returning an empty list .
37
17
12,078
def baseoffset ( self ) : try : return self . parent . baseoffset + self . offset except AttributeError : return self . offset
The offset of this node from the root node .
29
10
12,079
def _almost_equal ( a , b ) : # arbitrary small number!!! threshold = 1e-9 diff = np . abs ( a - b ) return ( diff < threshold )
Check if the two numbers are almost equal
38
8
12,080
def complement_alleles ( self ) : self . alleles = self . _encode_alleles ( [ complement_alleles ( i ) for i in self . alleles ] )
Complement the alleles of this variant .
40
9
12,081
def flip_coded ( self ) : self . genotypes = 2 - self . genotypes self . reference , self . coded = self . coded , self . reference
Flips the coding of the alleles .
34
9
12,082
def flip_strand ( self ) : self . reference = complement_alleles ( self . reference ) self . coded = complement_alleles ( self . coded ) self . variant . complement_alleles ( )
Flips the strand of the alleles .
45
9
12,083
def rotvec2mat ( u , phi ) : phi = np . squeeze ( phi ) norm_u = np . linalg . norm ( u ) if norm_u < 1e-12 : raise Exception ( "the rotation vector is equal to zero" ) u = u / norm_u # http://en.wikipedia.org/wiki/Rotation_matrix s = np . sin ( phi ) c = np . cos ( phi ) t = 1 - c ux = u [ 0 ] uy = u [ 1 ] uz = u [ 2 ] res = np . array ( [ [ t * ux * ux + c , t * ux * uy - s * uz , t * ux * uz + s * uy ] , [ t * ux * uy + s * uz , t * uy * uy + c , t * uy * uz - s * ux ] , [ t * ux * uz - s * uy , t * uy * uz + s * ux , t * uz * uz + c ] ] ) return res
Convert rotation from axis and angle to matrix representation
250
10
12,084
def det2lab_xds ( pixels_coord , frame_number , starting_frame , starting_angle , oscillation_angle , rotation_axis , wavelength , wavevector , NX , NY , pixelsize_x , pixelsize_y , distance_to_detector , x_center , y_center , detector_x , detector_y , detector_normal , * * kwargs ) : array_shape = ( 1 , 3 ) if detector_x . shape == array_shape : detector_x = detector_x . T detector_y = detector_y . T detector_normal = detector_normal . T if wavevector . shape == array_shape : wavevector = wavevector . T if rotation_axis . shape == array_shape : rotation_axis = rotation_axis . T xmm = ( pixels_coord [ : , [ 0 ] ] - x_center ) * pixelsize_x ymm = ( pixels_coord [ : , [ 1 ] ] - y_center ) * pixelsize_y # find scattering vector of each pixel scattering_vector_mm = np . outer ( xmm , detector_x ) + np . outer ( ymm , detector_y ) + distance_to_detector * np . outer ( np . ones ( shape = xmm . shape ) , detector_normal ) scattering_vector_mm = scattering_vector_mm . T phi = ( frame_number - starting_frame ) * oscillation_angle + starting_angle # calculating norm for each column norms = np . sum ( scattering_vector_mm ** 2. , axis = 0 ) ** ( 1. / 2 ) #deviding scattering vector by its own norm unit_scattering_vector = scattering_vector_mm / norms #subtracting incident beam vector h = unit_scattering_vector / wavelength - np . tile ( wavevector , ( unit_scattering_vector . shape [ 1 ] , 1 ) ) . T #rotating if phi . size == 1 : h = np . dot ( rotvec2mat ( rotation_axis . T , - 2 * np . pi * phi / 360 ) , h ) else : for i in range ( phi . size ) : h [ : , [ i ] ] = np . dot ( rotvec2mat ( rotation_axis . T , - 2 * np . pi * phi [ i ] / 360 ) , h [ : , [ i ] ] ) return h , scattering_vector_mm , unit_scattering_vector
Converts pixels coordinates from the frame into q - vector
535
11
12,085
def get_query_dict ( self , * * kwargs ) : total_cols = ensure ( int , kwargs . get ( 'total_cols' , [ 0 ] ) [ 0 ] , 0 ) mapping = self . mapping filter_dict = defaultdict ( dict ) # set up the starter, since sometimes we start the enumeration from '1' starter = mapping . keys ( ) [ 0 ] for i in range ( starter , total_cols ) : key = 'columns[{index}]' . format ( index = i ) if kwargs . get ( key + '[searchable]' , [ 0 ] ) [ 0 ] != 'true' : continue search_value = kwargs . get ( key + '[search][value]' , [ '' ] ) [ 0 ] . strip ( ) if not search_value : continue enum_item = mapping . from_key ( i ) filter_obj = enum_item . extra if type ( filter_obj ) is tuple and len ( filter_obj ) == 2 : filter_func , filter_key = filter_obj filter_dict [ filter_func ] [ filter_key ] = search_value elif type ( filter_obj ) is str : filter_dict [ 'filter' ] [ filter_obj ] = search_value else : raise ValueError ( "Invalid filter key." ) return filter_dict
function to generate a filter dictionary in which the key is the keyword used in django filter function in string form and the value is the searched value .
294
30
12,086
def get_order_key ( self , * * kwargs ) : # get the mapping enumeration class from Meta class mapping = self . mapping # use the first element in the enumeration as default order column order_column = kwargs . get ( 'order[0][column]' , [ mapping . keys ( ) [ 0 ] ] ) [ 0 ] order_column = ensure ( int , order_column , mapping . keys ( ) [ 0 ] ) order = kwargs . get ( 'order[0][dir]' , [ 'asc' ] ) [ 0 ] order_key = mapping . from_key ( order_column ) . label # django orm '-' -> desc if order == 'desc' : order_key = '-' + order_key return order_key
function to get the order key to apply it in the filtered queryset
168
15
12,087
def filtering ( queryset , query_dict ) : # apply pre_search_condition for key , value in query_dict . items ( ) : assert hasattr ( queryset , key ) , "Parameter 'query_dict' contains" " non-existent attribute." if isinstance ( value , list ) : queryset = getattr ( queryset , key ) ( * value ) elif isinstance ( value , dict ) : queryset = getattr ( queryset , key ) ( * * value ) else : queryset = getattr ( queryset , key ) ( value ) return queryset
function to apply the pre search condition to the queryset to narrow down the queryset s size
134
21
12,088
def slicing ( queryset , * * kwargs ) : # if the length is -1, we need to display all the records # otherwise, just slicing the queryset length = ensure ( int , kwargs . get ( 'length' , [ 0 ] ) [ 0 ] , 0 ) start = ensure ( int , kwargs . get ( 'start' , [ 0 ] ) [ 0 ] , 0 ) if length >= 0 : queryset = queryset [ start : start + length ] return queryset
function to slice the queryset according to the display length
113
12
12,089
def query_by_args ( self , pre_search_condition = None , * * kwargs ) : if pre_search_condition and not isinstance ( pre_search_condition , OrderedDict ) : raise TypeError ( "Parameter 'pre_search_condition' must be an OrderedDict." ) # extract requisite parameters from kwargs draw = ensure ( int , kwargs . get ( 'draw' , [ 0 ] ) [ 0 ] , 0 ) # just implement the get_query_dict function query_dict = self . get_query_dict ( * * kwargs ) order_key = self . get_order_key ( * * kwargs ) # get the model from the serializer parameter model_class = self . serializer . Meta . model # get the objects queryset = model_class . objects # apply the pre search condition if it exists if pre_search_condition : queryset = self . filtering ( queryset , pre_search_condition ) else : queryset = queryset . all ( ) # number of the total records total = queryset . count ( ) # if the query dict not empty, then apply the query dict if query_dict : queryset = self . filtering ( queryset , query_dict ) # number of the records after applying the query count = queryset . count ( ) # order the queryset queryset = queryset . order_by ( order_key ) # slice the queryset queryset = self . slicing ( queryset , * * kwargs ) return { 'items' : queryset , 'count' : count , 'total' : total , 'draw' : draw }
intends to process the queries sent by data tables package in frontend . The model_cls indicates the model class get_query_dict is a function implemented by you such that it can return a query dictionary in which the key is the query keyword in str form and the value is the queried value
371
62
12,090
def process ( self , pre_search_condition = None , * * kwargs ) : records = self . query_by_args ( pre_search_condition = pre_search_condition , * * kwargs ) serializer = self . serializer ( records [ 'items' ] , many = True ) result = { 'data' : serializer . data , 'draw' : records [ 'draw' ] , 'recordsTotal' : records [ 'total' ] , 'recordsFiltered' : records [ 'count' ] , } return result
function to be called outside to get the footer search condition apply the search in DB and render the serialized result .
120
24
12,091
def coerce ( cls , key , value ) : self = MutationDict ( ( k , MutationObj . coerce ( key , v ) ) for ( k , v ) in value . items ( ) ) self . _key = key return self
Convert plain dictionary to MutationDict
55
9
12,092
def coerce ( cls , key , value ) : self = MutationList ( ( MutationObj . coerce ( key , v ) for v in value ) ) self . _key = key return self
Convert plain list to MutationList
44
8
12,093
def structure ( cls ) : # type: () -> Text downstream = cls . cutter . elucidate ( ) upstream = str ( Seq ( downstream ) . reverse_complement ( ) ) return "" . join ( [ upstream . replace ( "^" , ")(" ) . replace ( "_" , "(" ) , "N*" , downstream . replace ( "^" , ")(" ) . replace ( "_" , ")" ) , ] )
Get the vector structure as a DNA regex pattern .
98
10
12,094
def placeholder_sequence ( self ) : # type: () -> SeqRecord if self . cutter . is_3overhang ( ) : return self . _match . group ( 2 ) + self . overhang_end ( ) else : return self . overhang_start ( ) + self . _match . group ( 2 )
Get the placeholder sequence in the vector .
69
8
12,095
def target_sequence ( self ) : # type: () -> SeqRecord if self . cutter . is_3overhang ( ) : start , end = self . _match . span ( 2 ) [ 0 ] , self . _match . span ( 3 ) [ 1 ] else : start , end = self . _match . span ( 1 ) [ 0 ] , self . _match . span ( 2 ) [ 1 ] return add_as_source ( self . record , ( self . record << start ) [ end - start : ] )
Get the target sequence in the vector .
114
8
12,096
def assemble ( self , module , * modules , * * kwargs ) : # type: (AbstractModule, *AbstractModule, **Any) -> SeqRecord mgr = AssemblyManager ( vector = self , modules = [ module ] + list ( modules ) , name = kwargs . get ( "name" , "assembly" ) , id_ = kwargs . get ( "id" , "assembly" ) , ) return mgr . assemble ( )
Assemble the provided modules into the vector .
99
9
12,097
async def onConnect ( self ) : # Add extra attribute # This allows for following crossbar/autobahn spec # without changing legacy configuration if not hasattr ( self . config , 'extra' ) : original_config = { 'config' : self . config } self . config = objdict ( self . config ) setattr ( self . config , 'extra' , original_config ) self . config . extra [ 'handlers' ] = self . handlers # setup transport host self . transport_host = self . config . extra [ 'config' ] [ 'transport_host' ] # subscription setup self . subscribe_options = SubscribeOptions ( * * self . config . extra [ 'config' ] [ 'sub_options' ] ) self . replay_events = self . config . extra [ 'config' ] [ 'replay_events' ] # publishing setup self . publish_topic = self . config . extra [ 'config' ] [ 'publish_topic' ] [ 'topic' ] self . publish_options = PublishOptions ( * * self . config . extra [ 'config' ] [ 'pub_options' ] ) # setup callback self . handlers = self . config . extra [ 'handlers' ] # optional subscribed topics from config.json self . subscribed_topics = self . config . extra [ 'config' ] [ 'subscribed_topics' ] # put name on session self . name = self . config . extra [ 'config' ] [ 'name' ] # setup db pool - optionally if self . config . extra [ 'config' ] [ 'pub_options' ] [ 'retain' ] is True : self . pool = await asyncpg . create_pool ( user = EVENT_DB_USER , password = EVENT_DB_PASS , host = EVENT_DB_HOST , database = EVENT_DB_NAME ) # Handle non crossbar drivers try : self . join ( self . config . realm ) except AttributeError : pass
Configure the component
422
4
12,098
def getback ( config , force = False ) : repo = config . repo active_branch = repo . active_branch if active_branch . name == "master" : error_out ( "You're already on the master branch." ) if repo . is_dirty ( ) : error_out ( 'Repo is "dirty". ({})' . format ( ", " . join ( [ repr ( x . b_path ) for x in repo . index . diff ( None ) ] ) ) ) branch_name = active_branch . name state = read ( config . configfile ) origin_name = state . get ( "ORIGIN_NAME" , "origin" ) upstream_remote = None fork_remote = None for remote in repo . remotes : if remote . name == origin_name : # remote.pull() upstream_remote = remote break if not upstream_remote : error_out ( "No remote called {!r} found" . format ( origin_name ) ) # Check out master repo . heads . master . checkout ( ) upstream_remote . pull ( repo . heads . master ) # Is this one of the merged branches?! # XXX I don't know how to do this "natively" with GitPython. merged_branches = [ x . strip ( ) for x in repo . git . branch ( "--merged" ) . splitlines ( ) if x . strip ( ) and not x . strip ( ) . startswith ( "*" ) ] was_merged = branch_name in merged_branches certain = was_merged or force if not certain : # Need to ask the user. # XXX This is where we could get smart and compare this branch # with the master. certain = ( input ( "Are you certain {} is actually merged? [Y/n] " . format ( branch_name ) ) . lower ( ) . strip ( ) != "n" ) if not certain : return 1 if was_merged : repo . git . branch ( "-d" , branch_name ) else : repo . git . branch ( "-D" , branch_name ) fork_remote = None for remote in repo . remotes : if remote . name == state . get ( "FORK_NAME" ) : fork_remote = remote break if fork_remote : fork_remote . push ( ":" + branch_name ) info_out ( "Remote branch on fork deleted too." )
Goes back to the master branch deletes the current branch locally and remotely .
519
16
12,099
def get ( _class , api , vid ) : busses = api . vehicles ( vid = vid ) [ 'vehicle' ] return _class . fromapi ( api , api . vehicles ( vid = vid ) [ 'vehicle' ] )
Return a Bus object for a certain vehicle ID vid using API instance api .
58
16