idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
10,200
def patch_qs ( url : Text , data : Dict [ Text , Text ] ) -> Text : qs_id = 4 p = list ( urlparse ( url ) ) qs = parse_qsl ( p [ qs_id ] ) # type: List[Tuple[Text, Text]] patched_qs = list ( chain ( filter ( lambda x : x [ 0 ] not in data , qs ) , data . items ( ) , ) ) p [ qs_id ] = urlencode ( patched_qs ) return urlunparse ( p )
Given an URL change the query string to include the values specified in the dictionary .
122
16
10,201
def dict_is_subset ( subset : Any , full_set : Any ) -> bool : if not isinstance ( subset , full_set . __class__ ) : return False elif isinstance ( subset , dict ) : for k , v in subset . items ( ) : if k not in full_set or not dict_is_subset ( v , full_set [ k ] ) : return False return True elif isinstance ( subset , list ) : if len ( subset ) != len ( full_set ) : return False for a , b in zip ( subset , full_set ) : if not dict_is_subset ( a , b ) : return False return True else : return subset == full_set
Checks that all keys present in subset are present and have the same value in full_set . If a key is in full_set but not in subset then True will be returned anyways .
155
39
10,202
def _compile ( self , expression ) : x = self . RE_PYTHON_VAR . sub ( '(?:\\1,)' , expression ) x = self . RE_SPACES . sub ( '' , x ) return re . compile ( x )
Transform a class exp into an actual regex
58
8
10,203
def _make_string ( self , objects : List [ Any ] ) -> Text : return '' . join ( x . __class__ . __name__ + ',' for x in objects )
Transforms a list of objects into a matchable string
40
11
10,204
def match ( self , objects : List [ Any ] ) -> bool : s = self . _make_string ( objects ) m = self . _compiled_expression . match ( s ) return m is not None
Return True if the list of objects matches the expression .
45
11
10,205
def get_conf ( conf , sect , opt ) : argu = getattr ( args , "mambupy_" + opt . lower ( ) ) if not argu : envir = os . environ . get ( "MAMBUPY_" + opt . upper ( ) ) if not envir : try : return conf . get ( sect , opt ) except NoSectionError : return default_configs [ opt ] return envir return argu
Gets a config opt from conf file under section sect .
99
12
10,206
def iso8601timestamp ( T = None , nanos = True , utc = False ) : T = time . time ( ) if T is None else T Ti = math . floor ( T ) Tn = round ( ( T - Ti ) * 1e9 ) if Tn >= 1e9 : Ti += 1 Tn = 0 s = time . gmtime ( Ti ) if utc else time . localtime ( Ti ) f = time . strftime ( "%Y%m%dT%H%M%S" , s ) n = ".{:09d}" . format ( Tn ) if nanos else "" tz = "Z" if utc else time . strftime ( "%z" , s ) return f + n + tz
Get ISO8601 - formatted timestamp string .
165
9
10,207
def createWorkDir ( baseDir , projName , expUUID , expNames = [ ] , nanos = True , utc = False ) : # # First, ensure the project's top-level hierarchy, especially by-uuid/, # exists, so that the only possible failure is due to the creation of # one additional directory. # projDir = os . path . join ( baseDir , projName ) byuuidDir = os . path . join ( projDir , "by-uuid" ) bytimeDir = os . path . join ( projDir , "by-time" ) bynameDir = os . path . join ( projDir , "by-name" , * expNames ) byuuidPath = os . path . join ( byuuidDir , expUUID ) os . makedirs ( byuuidDir , mode = 0o755 , exist_ok = True ) os . makedirs ( bytimeDir , mode = 0o755 , exist_ok = True ) os . makedirs ( bynameDir , mode = 0o755 , exist_ok = True ) # # Attempt the creation of the experiment workDir by its UUID. Record # whether we were the original creators. # try : preexisting = False os . makedirs ( byuuidPath , mode = 0o755 , exist_ok = False ) except FileExistsError : preexisting = True # # If we were the first to create this working directory, additionally # make symlinks pointing to it from the auxiliary directories. # if not preexisting : expTime = iso8601timestamp ( nanos = nanos , utc = utc ) expTimeUUID = expTime + "-" + expUUID bytimePath = os . path . join ( bytimeDir , expTimeUUID ) bynamePath = os . path . join ( bynameDir , expUUID ) os . symlink ( os . path . relpath ( byuuidPath , bytimeDir ) , bytimePath , True ) os . symlink ( os . path . relpath ( byuuidPath , bynameDir ) , bynamePath , True ) # # Create handy .rsync-filter files. # with contextlib . suppress ( OSError ) : with open ( os . path . join ( baseDir , ".rsync-filter" ) , "x" ) as f : f . write ( "#\n" "# rsync filter rules.\n" "#\n" "# When the argument -F is given to rsync, the rules within will be obeyed.\n" "#\n" ) with contextlib . suppress ( OSError ) : with open ( os . path . join ( projDir , ".rsync-filter" ) , "x" ) as f : f . write ( "#\n" "# rsync filter rules.\n" "#\n" "# When the argument -F is given to rsync, the rules within will be obeyed.\n" "#\n" ) # # Return the constructed workDir. # return byuuidPath
Create working directory for experiment if not existing already .
675
10
10,208
def humanize_timesince ( start_time ) : # pylint:disable=too-many-return-statements if not start_time : return start_time delta = local_now ( ) - start_time # assumption: negative delta values originate from clock # differences on different app server machines if delta . total_seconds ( ) < 0 : return 'a few seconds ago' num_years = delta . days // 365 if num_years > 0 : return '{} year{} ago' . format ( * ( ( num_years , 's' ) if num_years > 1 else ( num_years , '' ) ) ) num_weeks = delta . days // 7 if num_weeks > 0 : return '{} week{} ago' . format ( * ( ( num_weeks , 's' ) if num_weeks > 1 else ( num_weeks , '' ) ) ) num_days = delta . days if num_days > 0 : return '{} day{} ago' . format ( * ( ( num_days , 's' ) if num_days > 1 else ( num_days , '' ) ) ) num_hours = delta . seconds // 3600 if num_hours > 0 : return '{} hour{} ago' . format ( * ( ( num_hours , 's' ) if num_hours > 1 else ( num_hours , '' ) ) ) num_minutes = delta . seconds // 60 if num_minutes > 0 : return '{} minute{} ago' . format ( * ( ( num_minutes , 's' ) if num_minutes > 1 else ( num_minutes , '' ) ) ) return 'a few seconds ago'
Creates a string representation of time since the given start_time .
373
14
10,209
def humanize_timedelta ( seconds ) : hours , remainder = divmod ( seconds , 3600 ) days , hours = divmod ( hours , 24 ) minutes , seconds = divmod ( remainder , 60 ) if days : result = '{}d' . format ( days ) if hours : result += ' {}h' . format ( hours ) if minutes : result += ' {}m' . format ( minutes ) return result if hours : result = '{}h' . format ( hours ) if minutes : result += ' {}m' . format ( minutes ) return result if minutes : result = '{}m' . format ( minutes ) if seconds : result += ' {}s' . format ( seconds ) return result return '{}s' . format ( seconds )
Creates a string representation of timedelta .
163
9
10,210
def start ( self ) : if self . _http_last_send is not None : raise RuntimeError ( 'HttpMock has already been started' ) # 1) save request.Session.send in self._last_send # 2) replace request.Session.send with MockerCore send function super ( HttpMock , self ) . start ( ) # 3) save MockerCore send function in self._http_last_send # 4) replace request.Session.send with HttpMock send function self . _patch_last_send ( )
Overrides default start behaviour by raising ConnectionError instead of custom requests_mock . exceptions . NoMockAddress .
119
25
10,211
def unhandle ( self , handler ) : h , _ , _ = self . _extract ( handler ) key = hash ( h ) with self . _hlock : if key not in self . handlers : raise ValueError ( 'Handler "%s" was not found' % str ( h ) ) handlers = self . handlers . copy ( ) del handlers [ key ] self . handlers = handlers return self
Unregisters a handler
84
5
10,212
def fire ( self , * args , * * kw ) : result = [ ] with self . _hlock : handlers = self . handlers if self . threads == 0 : # same-thread execution - synchronized for k in handlers : # handler, memoize, timeout h , m , t = handlers [ k ] try : r = self . _memoize ( h , m , t , * args , * * kw ) result . append ( tuple ( r ) ) except : result . append ( ( False , self . _error ( sys . exc_info ( ) ) , h ) ) elif self . threads > 0 : # multi-thread execution - desynchronized if self.threads > 1 queue = Queue ( ) # result lock just in case [].append() is not # thread-safe in other Python implementations rlock = RLock ( ) def _execute ( * args , * * kw ) : """ Executes all handlers stored in the queue """ while True : try : item = queue . get ( ) if item is None : queue . task_done ( ) break # handler, memoize, timeout h , m , t = handlers [ item ] # call under active lock try : r = self . _memoize ( h , m , t , * args , * * kw ) if not self . asynch : with rlock : result . append ( tuple ( r ) ) except : if not self . asynch : with rlock : result . append ( ( False , self . _error ( sys . exc_info ( ) ) , h ) ) queue . task_done ( ) except Empty : # never triggered, just to be safe break if handlers : threads = self . _threads ( handlers = handlers ) for _ in range ( threads ) : t = Thread ( target = _execute , args = args , kwargs = kw ) t . daemon = True t . start ( ) for k in handlers : queue . put ( k ) if self . asynch : # main thread, no locking required h , _ , _ = handlers [ k ] result . append ( ( None , None , h ) ) for _ in range ( threads ) : queue . put ( None ) # stop each worker if not self . asynch : queue . join ( ) return tuple ( result ) or None
Stores all registered handlers in a queue for processing
494
10
10,213
def clear ( self ) : with self . _hlock : self . handlers . clear ( ) with self . _mlock : self . memoize . clear ( )
Discards all registered handlers and cached results
35
8
10,214
def _timeout ( self , timeout , handler , * args , * * kw ) : t = spawn_thread ( target = handler , args = args , kw = kw ) t . daemon = True t . start ( ) t . join ( timeout ) if not t . is_alive ( ) : if t . exc_info : return t . exc_info return t . result else : try : msg = '[%s] Execution was forcefully terminated' raise RuntimeError ( msg % t . name ) except : return sys . exc_info ( )
Controls the time allocated for the execution of a method
118
11
10,215
def _threads ( self , handlers ) : if self . threads < len ( handlers ) : return self . threads return len ( handlers )
Calculates maximum number of threads that will be started
29
11
10,216
def update ( self ) : if not self . _ok : self . log_error ( "Trying to restore OK mode w/ soft reset" ) self . _ok = self . _soft_reset ( ) try : self . _bus . write_byte ( self . _i2c_add , CMD_READ_TEMP_NOHOLD ) sleep ( MEASUREMENT_WAIT_TIME ) buf_t = self . _bus . read_i2c_block_data ( self . _i2c_add , CMD_READ_TEMP_HOLD , 3 ) self . _bus . write_byte ( self . _i2c_add , CMD_READ_HUM_NOHOLD ) sleep ( MEASUREMENT_WAIT_TIME ) buf_h = self . _bus . read_i2c_block_data ( self . _i2c_add , CMD_READ_HUM_HOLD , 3 ) except OSError as exc : self . _ok = False self . log_error ( "Bad reading: %s" , exc ) return if self . _crc8check ( buf_t ) : temp = ( buf_t [ 0 ] << 8 | buf_t [ 1 ] ) & 0xFFFC self . _temperature = self . _calc_temp ( temp ) if self . _crc8check ( buf_h ) : humid = ( buf_h [ 0 ] << 8 | buf_h [ 1 ] ) & 0xFFFC rh_actual = self . _calc_humid ( humid ) # For temperature coefficient compensation rh_final = self . _temp_coefficient ( rh_actual , self . _temperature ) rh_final = 100.0 if rh_final > 100 else rh_final # Clamp > 100 rh_final = 0.0 if rh_final < 0 else rh_final # Clamp < 0 self . _humidity = rh_final else : self . _humidity = - 255 self . _ok = False self . log_error ( "Bad CRC error with humidity" ) else : self . _temperature = - 255 self . _ok = False self . log_error ( "Bad CRC error with temperature" )
Read raw data and calculate temperature and humidity .
491
9
10,217
def get_owner_access_token ( self ) : from . database import Session db_session = Session . object_session ( self ) owner = db_session . query ( User ) . filter_by ( id_ = self . owner_id ) . first ( ) return owner . access_token
Return workflow owner access token .
63
6
10,218
def update_workflow_status ( db_session , workflow_uuid , status , new_logs = '' , message = None ) : try : workflow = db_session . query ( Workflow ) . filter_by ( id_ = workflow_uuid ) . first ( ) if not workflow : raise Exception ( 'Workflow {0} doesn\'t exist in database.' . format ( workflow_uuid ) ) if status : workflow . status = status if new_logs : workflow . logs = ( workflow . logs or '' ) + new_logs + '\n' db_session . commit ( ) except Exception as e : raise e
Update database workflow status .
139
5
10,219
def parse_server_addr ( str_addr , default_port = 26000 ) : m = ADDR_STR_RE . match ( str_addr ) if m is None : raise ValueError ( 'Bad address string "{0}"' . format ( str_addr ) ) dct = m . groupdict ( ) port = dct . get ( 'port' ) if port is None : port = default_port else : port = int ( port ) # Caution: could raise ValueEror or TypeError if port == 0 : raise ValueError ( "Port can't be zero" ) host = dct [ 'host' ] if dct [ 'host' ] else dct [ 'host6' ] return host , port
Parse address and returns host and port
156
8
10,220
def request_goto ( self , tc = None ) : if not tc : tc = TextHelper ( self . editor ) . word_under_cursor ( select_whole_word = True ) if not self . _definition or isinstance ( self . sender ( ) , QAction ) : self . select_word ( tc ) if self . _definition is not None : QTimer . singleShot ( 100 , self . _goto_def )
Request a go to assignment .
96
6
10,221
def get_template ( name ) : path = os . path . join ( base_dir , name ) if path not in templates : try : templates [ path ] = Template ( path ) except IOError : return None return copy . deepcopy ( templates [ path ] )
Return a copy of the template with the specified name . If not found or an error occurs during the load return None .
56
24
10,222
def set_value ( self , eid , val , idx = '*' ) : if eid in self . __element_ids : elems = self . __element_ids [ eid ] if type ( val ) in SEQ_TYPES : idx = 0 if idx == '*' : for elem in elems : self . __set_value ( eid , elem , val , idx ) elif idx < len ( elems ) : self . __set_value ( eid , elems [ idx ] , val , idx )
Set the content of an xml element marked with the matching eid attribute .
127
15
10,223
def set_attribute ( self , aid , attrib , val , idx = '*' ) : if aid in self . __attrib_ids : elems = self . __attrib_ids [ aid ] if idx == '*' : for elem in elems : self . __set_attribute ( elem , attrib , val ) elif idx < len ( elems ) : elem = elems [ idx ] self . __set_attribute ( elem , attrib , val )
Set the value of an xml attribute marked with the matching aid attribute .
111
14
10,224
def hide ( self , eid , index = 0 ) : elems = None if eid in self . __element_ids : elems = self . __element_ids [ eid ] elif eid in self . __repeat_ids : elems = self . __repeat_ids [ eid ] if elems and index < len ( elems ) : elem = elems [ index ] elem . parent . children . remove ( elem )
Hide the element with the matching eid . If no match look for an element with a matching rid .
98
21
10,225
def repeat ( self , rid , count , index = 0 ) : elems = None if rid in self . __repeat_ids : elems = self . __repeat_ids [ rid ] elif rid in self . __element_ids : elems = self . __element_ids if elems and index < len ( elems ) : elem = elems [ index ] self . __repeat ( elem , count )
Repeat an xml element marked with the matching rid .
90
10
10,226
def replace ( self , eid , replacement , index = 0 ) : if eid in self . __element_ids : elems = self . __element_ids [ eid ] elif eid in self . __repeat_ids : elems = self . __repeat_ids [ eid ] else : return if index < len ( elems ) : elem = elems [ index ] current_pos = elem . parent . children . index ( elem ) elem . parent . children . remove ( elem ) replacement_type = type ( replacement ) if replacement_type in ( Element , TextElement ) : self . check_element ( replacement , True ) elem . parent . children . insert ( current_pos , replacement ) replacement . parent = elem . parent elif replacement_type == Template : for child in replacement . root . children : elem . parent . children . insert ( current_pos , child ) child . parent = elem . parent current_pos += 1 self . __merge_ids ( self . __element_ids , replacement . __element_ids ) self . __merge_ids ( self . __attrib_ids , replacement . __attrib_ids ) self . __merge_ids ( self . __repeat_ids , replacement . __repeat_ids ) else : elem . parent . children . insert ( current_pos , TextElement ( replacement ) )
Replace an xml element marked with the matching eid . If the replacement value is an Element or TextElement it s swapped in untouched . If it s a Template the children of the root element in the template are used . Otherwise the replacement value is wrapped with a TextElement .
299
56
10,227
def set_hasher ( self , hash , rounds = None ) : hash = hash . replace ( '-' , '_' ) if hash not in VALID_HASHERS : raise WrongHashAlgorithm ( WRONG_HASH_MESSAGE ) hasher = getattr ( ph , hash ) utils . test_hasher ( hasher ) default_rounds = getattr ( hasher , 'default_rounds' , 1 ) min_rounds = getattr ( hasher , 'min_rounds' , 1 ) max_rounds = getattr ( hasher , 'max_rounds' , float ( "inf" ) ) rounds = min ( max ( rounds or default_rounds , min_rounds ) , max_rounds ) op = { 'schemes' : VALID_HASHERS + DEPRECATED_HASHERS , 'deprecated' : DEPRECATED_HASHERS , 'default' : hash , hash + '__default_rounds' : rounds } self . hasher = CryptContext ( * * op ) self . hash = hash . replace ( '_' , '-' ) # For testing self . rounds = rounds
Updates the has algorithm and optionally the number of rounds to use .
258
14
10,228
def to_bool ( value , do_raise = True ) : value = value . lower ( ) # Try it as an integer if value . isdigit ( ) : return bool ( int ( value ) ) # OK, check it against the true/false values... if value in _str_true : return True elif value in _str_false : return False # Not recognized if do_raise : raise ValueError ( "invalid literal for to_bool(): %r" % value ) return False
Convert a string to a boolean value .
105
9
10,229
async def become ( self , layer_type : Type [ L ] , request : 'Request' ) -> L : raise ValueError ( 'Cannot become "{}"' . format ( layer_type . __name__ ) )
Transform this layer into another layer type
48
7
10,230
async def become ( self , layer_type : Type [ L ] , request : 'Request' ) : if layer_type != RawText : super ( Text , self ) . become ( layer_type , request ) return RawText ( await render ( self . text , request ) )
Transforms the translatable string into an actual string and put it inside a RawText .
60
18
10,231
def _make_register ( self ) -> BaseRegisterStore : s = settings . REGISTER_STORE store_class = import_class ( s [ 'class' ] ) return store_class ( * * s [ 'params' ] )
Make the register storage .
51
5
10,232
def _make_transitions ( self ) -> List [ Transition ] : module_name = settings . TRANSITIONS_MODULE module_ = importlib . import_module ( module_name ) return module_ . transitions
Load the transitions file .
46
5
10,233
def _make_allowed_states ( self ) -> Iterator [ Text ] : for trans in self . transitions : yield trans . dest . name ( ) if trans . origin : yield trans . origin . name ( )
Sometimes we load states from the database . In order to avoid loading an arbitrary class we list here the state classes that are allowed .
45
26
10,234
async def _find_trigger ( self , request : Request , origin : Optional [ Text ] = None , internal : bool = False ) -> Tuple [ Optional [ BaseTrigger ] , Optional [ Type [ BaseState ] ] , Optional [ bool ] , ] : reg = request . register if not origin : origin = reg . get ( Register . STATE ) logger . debug ( 'From state: %s' , origin ) results = await asyncio . gather ( * ( x . rank ( request , origin ) for x in self . transitions if x . internal == internal ) ) if len ( results ) : score , trigger , state , dnr = max ( results , key = lambda x : x [ 0 ] ) if score >= settings . MINIMAL_TRIGGER_SCORE : return trigger , state , dnr return None , None , None
Find the best trigger for this request or go away .
178
11
10,235
def _confused_state ( self , request : Request ) -> Type [ BaseState ] : origin = request . register . get ( Register . STATE ) if origin in self . _allowed_states : try : return import_class ( origin ) except ( AttributeError , ImportError ) : pass return import_class ( settings . DEFAULT_STATE )
If we re confused find which state to call .
74
10
10,236
async def _build_state ( self , request : Request , message : BaseMessage , responder : Responder ) -> Tuple [ Optional [ BaseState ] , Optional [ BaseTrigger ] , Optional [ bool ] , ] : trigger , state_class , dnr = await self . _find_trigger ( request ) if trigger is None : if not message . should_confuse ( ) : return None , None , None state_class = self . _confused_state ( request ) logger . debug ( 'Next state: %s (confused)' , state_class . name ( ) ) else : logger . debug ( 'Next state: %s' , state_class . name ( ) ) state = state_class ( request , responder , trigger , trigger ) return state , trigger , dnr
Build the state for this request .
170
7
10,237
async def _run_state ( self , responder , state , trigger , request ) -> BaseState : user_trigger = trigger # noinspection PyBroadException try : if trigger : await state . handle ( ) else : await state . confused ( ) for i in range ( 0 , settings . MAX_INTERNAL_JUMPS + 1 ) : if i == settings . MAX_INTERNAL_JUMPS : raise MaxInternalJump ( ) trigger , state_class , dnr = await self . _find_trigger ( request , state . name ( ) , True ) if not trigger : break logger . debug ( 'Jumping to state: %s' , state_class . name ( ) ) state = state_class ( request , responder , trigger , user_trigger ) await state . handle ( ) except Exception : logger . exception ( 'Error while handling state "%s"' , state . name ( ) ) responder . clear ( ) reporter . report ( request , state . name ( ) ) await state . error ( ) return state
Execute the state or if execution fails handle it .
222
11
10,238
async def _build_state_register ( self , state : BaseState , request : Request , responder : Responder ) -> Dict : return { Register . STATE : state . name ( ) , Register . TRANSITION : await responder . make_transition_register ( request ) , }
Build the next register to store .
63
7
10,239
def runGetResults ( cmd , stdout = True , stderr = True , encoding = sys . getdefaultencoding ( ) ) : if stderr in ( 'stdout' , subprocess . STDOUT ) : stderr = subprocess . STDOUT elif stderr == True or stderr == subprocess . PIPE : stderr = subprocess . PIPE else : stderr = None if stdout == True or stdout == subprocess . STDOUT : stdout = subprocess . PIPE else : stdout = None if stderr == subprocess . PIPE : raise ValueError ( 'Cannot redirect stderr to stdout if stdout is not captured.' ) if issubclass ( cmd . __class__ , ( list , tuple ) ) : shell = False else : shell = True try : pipe = subprocess . Popen ( cmd , stdout = stdout , stderr = stderr , shell = shell ) except Exception as e : try : if shell is True : cmdStr = ' ' . join ( cmd ) else : cmdStr = cmd except : cmdStr = repr ( cmd ) raise SimpleCommandFailure ( 'Failed to execute "%s": %s' % ( cmdStr , str ( e ) ) , returnCode = 255 ) streams = [ ] fileNoToKey = { } ret = { } if stdout == subprocess . PIPE : streams . append ( pipe . stdout ) fileNoToKey [ pipe . stdout . fileno ( ) ] = 'stdout' ret [ 'stdout' ] = [ ] if stderr == subprocess . PIPE : streams . append ( pipe . stderr ) fileNoToKey [ pipe . stderr . fileno ( ) ] = 'stderr' ret [ 'stderr' ] = [ ] returnCode = None time . sleep ( .02 ) while returnCode is None or streams : returnCode = pipe . poll ( ) while True : ( readyToRead , junk1 , junk2 ) = select . select ( streams , [ ] , [ ] , .005 ) if not readyToRead : # Don't strangle CPU time . sleep ( .01 ) break for readyStream in readyToRead : retKey = fileNoToKey [ readyStream . fileno ( ) ] curRead = readyStream . read ( ) if curRead in ( b'' , '' ) : streams . remove ( readyStream ) continue ret [ retKey ] . append ( curRead ) for key in list ( ret . keys ( ) ) : ret [ key ] = b'' . join ( ret [ key ] ) if encoding : ret [ key ] = ret [ key ] . decode ( encoding ) ret [ 'returnCode' ] = returnCode return ret
runGetResults - Simple method to run a command and return the results of the execution as a dict .
598
21
10,240
def create_context_store ( name = 'default' , ttl = settings . CONTEXT_DEFAULT_TTL , store = settings . CONTEXT_STORE ) -> 'BaseContextStore' : store_class = import_class ( store [ 'class' ] ) return store_class ( name = name , ttl = ttl , * * store [ 'params' ] )
Create a context store . By default using the default configured context store but you can use a custom class if you want to using the store setting .
83
29
10,241
def camelcase ( text , acronyms = None ) : words , _case , _sep = case_parse . parse_case ( text , acronyms ) if words : words [ 0 ] = words [ 0 ] . lower ( ) return '' . join ( words )
Return text in camelCase style .
59
7
10,242
def dotcase ( text , acronyms = None ) : words , _case , _sep = case_parse . parse_case ( text , acronyms ) return '.' . join ( [ w . lower ( ) for w in words ] )
Return text in dot . case style .
54
8
10,243
def separate_words ( text , acronyms = None ) : words , _case , _sep = case_parse . parse_case ( text , acronyms , preserve_case = True ) return ' ' . join ( words )
Return text in seperate words style .
51
8
10,244
def init_db ( ) : import reana_db . models if not database_exists ( engine . url ) : create_database ( engine . url ) Base . metadata . create_all ( bind = engine )
Initialize the DB .
46
5
10,245
def _load_significant_pathways_file ( path_to_file ) : feature_pathway_df = pd . read_table ( path_to_file , header = 0 , usecols = [ "feature" , "side" , "pathway" ] ) feature_pathway_df = feature_pathway_df . sort_values ( by = [ "feature" , "side" ] ) return feature_pathway_df
Read in the significant pathways file as a pandas . DataFrame .
98
14
10,246
def _pathway_feature_permutation ( pathway_feature_tuples , permutation_max_iters ) : pathways , features = [ list ( elements_at_position ) for elements_at_position in zip ( * pathway_feature_tuples ) ] original_pathways = pathways [ : ] random . shuffle ( pathways ) feature_block_locations = { } i = 0 while i < len ( pathways ) : starting_index = i current_feature = features [ i ] pathway_set = set ( ) # input is grouped by feature, so we want to keep track of the start # and end of a given "block" of the same feature--this corresponds # to all the pathways overrepresented in that feature. while i < len ( pathways ) and features [ i ] == current_feature : # check the results of the permutation. if `pathway_set` does # not contain the current pathway, we are maintaining the # necessary invariants in our permutation thus far. if pathways [ i ] not in pathway_set : pathway_set . add ( pathways [ i ] ) else : k = 0 random_pathway = None while True : # select another random pathway from the list # and get the feature to which it belongs j = random . choice ( range ( 0 , len ( pathways ) ) ) random_pathway = pathways [ j ] random_feature = features [ j ] if ( random_pathway != pathways [ i ] and random_pathway not in pathway_set ) : # if this is a feature we have not already seen, # we are done. if random_feature not in feature_block_locations : break # otherwise, look at the indices that correspond # to that feature's block of pathways feature_block_start , feature_block_end = feature_block_locations [ random_feature ] pathway_block = pathways [ feature_block_start : feature_block_end ] # make sure that the current pathway is not in # that block--ensures that we maintain the invariant # after the swap if pathways [ i ] not in pathway_block : break k += 1 if k > permutation_max_iters : print ( "Permutation step: reached the maximum " "number of iterations {0}." . format ( permutation_max_iters ) ) return None pathway_set . add ( random_pathway ) pathways [ j ] = pathways [ i ] pathways [ i ] = random_pathway i += 1 ending_index = i feature_block_locations [ current_feature ] = ( starting_index , ending_index ) if original_pathways == pathways : return None return list ( zip ( pathways , features ) )
Permute the pathways across features for one side in the network . Used in permute_pathways_across_features
573
26
10,247
def weight_by_edge_odds_ratios ( self , edges_expected_weight , flag_as_significant ) : for edge_id , expected_weight in edges_expected_weight : edge_obj = self . edges [ edge_id ] edge_obj . weight /= expected_weight if edge_id in flag_as_significant : edge_obj . significant = True else : edge_obj . significant = False
Applied during the permutation test . Update the edges in the network to be weighted by their odds ratios . The odds ratio measures how unexpected the observed edge weight is based on the expected weight .
92
39
10,248
def aggregate ( self , merge ) : self . features = set ( ) self . n_features += merge . n_features vertex_id_conversion = self . convert_pathway_mapping ( merge . pathways ) for edge_id , edge in merge . edges . items ( ) : edge_key = self . remapped_edge ( vertex_id_conversion , edge_id ) if edge_key in self . edges : if self . edges [ edge_key ] . which_features : self . edges [ edge_key ] . which_features = [ ] self . edges [ edge_key ] . weight += edge . weight else : vertex0_id , vertex1_id = edge_key new_edge_obj = Edge ( vertex0_id , vertex1_id , [ ] ) new_edge_obj . weight = edge . weight self . edges [ edge_key ] = new_edge_obj self . _add_edge_to_vertex ( vertex0_id , new_edge_obj ) self . _add_edge_to_vertex ( vertex1_id , new_edge_obj )
Combine this network with another network . The aggregation step takes the union of the edges in the two networks where we take the sum of weights for edges common to both networks .
243
35
10,249
def edge_tuple ( self , vertex0_id , vertex1_id ) : pw0 = self . __getitem__ ( vertex0_id ) pw1 = self . __getitem__ ( vertex1_id ) if not pw0 or not pw1 : return None if pw0 < pw1 : return ( vertex0_id , vertex1_id ) elif pw0 > pw1 : return ( vertex1_id , vertex0_id ) else : return None
To avoid duplicate edges where the vertex ids are reversed we maintain that the vertex ids are ordered so that the corresponding pathway names are alphabetical .
111
30
10,250
def add_pathway ( self , pathway ) : if pathway not in self . pathways : self . pathways [ pathway ] = self . n_pathways self . n_pathways += 1 return self . pathways [ pathway ]
Updates self . pathways and self . n_pathways .
47
13
10,251
def get_edge_pathways ( self , edge_id ) : vertex0_id , vertex1_id = edge_id pw0 = self . get_pathway_from_vertex_id ( vertex0_id ) pw1 = self . get_pathway_from_vertex_id ( vertex1_id ) if not pw0 or not pw1 : return None return ( pw0 , pw1 )
Get the pathways associated with an edge .
97
8
10,252
def get_vertex_obj_from_pathway ( self , pathway ) : if pathway in self . pathways : vertex_id = self . pathways [ pathway ] return self . vertices [ vertex_id ] else : return None
Get the vertex object that corresponds to a pathway name
49
10
10,253
def get_adjacent_pathways ( self , pathway ) : vertex_id = self . pathways [ pathway ] adjacent = self . vertices [ vertex_id ] . get_adjacent_vertex_ids ( ) adjacent_pathways = [ ] for adjacent_id in adjacent : adjacent_pathways . append ( self . get_pathway_from_vertex_id ( adjacent_id ) ) return adjacent_pathways
Get the pathways adjacent to this pathway in the network
92
10
10,254
def to_dataframe ( self , drop_weights_below = 0 , whitelist = None ) : network_df_cols = [ "pw0" , "pw1" , "weight" ] if self . features : network_df_cols . append ( "features" ) network_df = pd . DataFrame ( columns = network_df_cols ) idx = 0 edge_pathways = set ( ) for ( v0 , v1 ) , edge_obj in self . edges . items ( ) : if ( edge_obj . weight > drop_weights_below and ( whitelist is None or ( v0 , v1 ) in whitelist ) ) : row = [ self . __getitem__ ( v0 ) , self . __getitem__ ( v1 ) , edge_obj . weight ] edge_pathways . add ( v0 ) edge_pathways . add ( v1 ) if self . features : features = edge_obj . features_to_string ( ) row . append ( features ) network_df . loc [ idx ] = row idx += 1 # faster to append by index. network_df = network_df . sort_values ( by = [ "weight" ] , ascending = False ) print ( "The pathway co-occurrence network " "contains {0} pathways." . format ( len ( edge_pathways ) ) ) return network_df
Conversion of the network to a pandas . DataFrame .
305
13
10,255
def _add_edge_to_vertex ( self , vertex_id , edge ) : connected_to = edge . connected_to ( vertex_id ) if vertex_id not in self . vertices : vertex_obj = Vertex ( vertex_id ) self . vertices [ vertex_id ] = vertex_obj self . vertices [ vertex_id ] . edges [ connected_to ] = edge . weight
Adds the edge to the Vertex object s edges dictionary
89
11
10,256
def _edges_from_permutation ( self , feature_pathway_dict ) : network_edges = { } for feature , pathway_list in feature_pathway_dict . items ( ) : for i in range ( len ( pathway_list ) ) : for j in range ( i + 1 , len ( pathway_list ) ) : vertex_i = pathway_list [ i ] vertex_j = pathway_list [ j ] new_edge = self . edge_tuple ( vertex_i , vertex_j ) if new_edge not in network_edges : network_edges [ new_edge ] = [ ] network_edges [ new_edge ] . append ( feature ) self . _augment_network ( network_edges )
Given a dictionary mapping each feature to the pathways overrepresented in the feature build a CoNetwork by creating edges for every pairwise combination of pathways in a feature .
164
32
10,257
def normalize_text ( self , text ) : if not self . editor . free_format : text = ' ' * 6 + text [ 6 : ] return text . upper ( )
Normalize text when fixed format is ON replace the first 6 chars by a space .
39
17
10,258
def get_neighborhood_network ( self , node_name : str , order : int = 1 ) -> Graph : logger . info ( "In get_neighborhood_graph()" ) neighbors = list ( self . get_neighbor_names ( node_name , order ) ) neighbor_network = self . graph . copy ( ) neighbor_network . delete_vertices ( self . graph . vs . select ( name_notin = neighbors ) ) return neighbor_network
Get the neighborhood graph of a node .
105
8
10,259
def get_neighbor_names ( self , node_name : str , order : int = 1 ) -> list : logger . info ( "In get_neighbor_names()" ) node = self . graph . vs . find ( name = node_name ) neighbors = self . graph . neighborhood ( node , order = order ) names = self . graph . vs [ neighbors ] [ "name" ] names . append ( node_name ) return list ( names )
Get the names of all neighbors of a node and the node itself .
100
14
10,260
def get_neighborhood_overlap ( self , node1 , node2 , connection_type = None ) : if connection_type is None or connection_type == "direct" : order = 1 elif connection_type == "second-degree" : order = 2 else : raise Exception ( "Invalid option: {}. Valid options are direct and second-degree" . format ( connection_type ) ) neighbors1 = self . graph . neighborhood ( node1 , order = order ) neighbors2 = self . graph . neighborhood ( node2 , order = order ) return set ( neighbors1 ) . intersection ( neighbors2 )
Get the intersection of two nodes s neighborhoods .
131
9
10,261
def parse ( cls , parser , text , pos ) : # pylint: disable=W0613 if not text . strip ( ) : return text , SyntaxError ( "Invalid value" ) class Rule ( object ) : grammar = attr ( 'value' , SpiresSimpleValue ) , omit ( re . compile ( ".*" ) ) try : tree = pypeg2 . parse ( text , Rule , whitespace = "" ) except SyntaxError : return text , SyntaxError ( "Expected %r" % cls ) else : r = tree . value if r . value . lower ( ) in ( 'and' , 'or' , 'not' ) : return text , SyntaxError ( "Invalid value %s" % r . value ) return text [ len ( r . value ) : ] , r
Match simple values excluding some Keywords like and and or
178
11
10,262
def get_field_infos ( code , free_format ) : offset = 0 field_infos = [ ] lines = _clean_code ( code ) previous_offset = 0 for row in process_cobol ( lines , free_format ) : fi = PicFieldInfo ( ) fi . name = row [ "name" ] fi . level = row [ "level" ] fi . pic = row [ "pic" ] fi . occurs = row [ "occurs" ] fi . redefines = row [ "redefines" ] fi . indexed_by = row [ "indexed_by" ] # find item that was redefined and use its offset if fi . redefines : for fib in field_infos : if fib . name == fi . redefines : offset = fib . offset # level 1 should have their offset set to 1 if fi . level == 1 : offset = 1 # level 78 have no offset if fi . level == 78 : offset = 0 # level 77 have offset always to 1 if fi . level == 77 : offset = 1 # set item offset fi . offset = offset # special case: level 88 have the same level as its parent if fi . level == 88 : fi . offset = previous_offset else : previous_offset = offset field_infos . append ( fi ) # compute offset of next PIC field. if row [ 'pic' ] : offset += row [ 'pic_info' ] [ 'length' ] return field_infos
Gets the list of pic fields information from line |start| to line |end| .
314
19
10,263
def get_signed_url ( self , params ) : params [ 'client' ] = self . client_id url_params = { 'protocol' : self . protocol , 'domain' : self . domain , 'service' : self . service , 'params' : urlencode ( params ) } secret = base64 . urlsafe_b64decode ( self . secret_key ) url_params [ 'url_part' ] = ( '/maps/api/%(service)s/json?%(params)s' % url_params ) signature = hmac . new ( secret , url_params [ 'url_part' ] , hashlib . sha1 ) url_params [ 'signature' ] = base64 . urlsafe_b64encode ( signature . digest ( ) ) return ( '%(protocol)s://%(domain)s%(url_part)s' '&signature=%(signature)s' % url_params )
Returns a Premier account signed url .
216
7
10,264
def parse_json ( self , page ) : if not isinstance ( page , basestring ) : page = util . decode_page ( page ) self . doc = json . loads ( page ) results = self . doc . get ( self . result_name , [ ] ) if not results : self . check_status ( self . doc . get ( 'status' ) ) return None return results
Returns json feed .
84
4
10,265
def _determine_case ( was_upper , words , string ) : case_type = 'unknown' if was_upper : case_type = 'upper' elif string . islower ( ) : case_type = 'lower' elif len ( words ) > 0 : camel_case = words [ 0 ] . islower ( ) pascal_case = words [ 0 ] . istitle ( ) or words [ 0 ] . isupper ( ) if camel_case or pascal_case : for word in words [ 1 : ] : c = word . istitle ( ) or word . isupper ( ) camel_case &= c pascal_case &= c if not c : break if camel_case : case_type = 'camel' elif pascal_case : case_type = 'pascal' else : case_type = 'mixed' return case_type
Determine case type of string .
192
8
10,266
def _advanced_acronym_detection ( s , i , words , acronyms ) : # Combine each letter into single string. acstr = '' . join ( words [ s : i ] ) # List of ranges representing found acronyms. range_list = [ ] # Set of remaining letters. not_range = set ( range ( len ( acstr ) ) ) # Search for each acronym in acstr. for acronym in acronyms : # TODO: Sanitize acronyms to include only letters. rac = regex . compile ( unicode ( acronym ) ) # Loop until all instances of the acronym are found, # instead of just the first. n = 0 while True : m = rac . search ( acstr , n ) if not m : break a , b = m . start ( ) , m . end ( ) n = b # Make sure found acronym doesn't overlap with others. ok = True for r in range_list : if a < r [ 1 ] and b > r [ 0 ] : ok = False break if ok : range_list . append ( ( a , b ) ) for j in xrange ( a , b ) : not_range . remove ( j ) # Add remaining letters as ranges. for nr in not_range : range_list . append ( ( nr , nr + 1 ) ) # No ranges will overlap, so it's safe to sort by lower bound, # which sort() will do by default. range_list . sort ( ) # Remove original letters in word list. for _ in xrange ( s , i ) : del words [ s ] # Replace them with new word grouping. for j in xrange ( len ( range_list ) ) : r = range_list [ j ] words . insert ( s + j , acstr [ r [ 0 ] : r [ 1 ] ] ) return s + len ( range_list ) - 1
Detect acronyms by checking against a list of acronyms .
407
14
10,267
def _simple_acronym_detection ( s , i , words , * args ) : # Combine each letter into a single string. acronym = '' . join ( words [ s : i ] ) # Remove original letters in word list. for _ in xrange ( s , i ) : del words [ s ] # Replace them with new word grouping. words . insert ( s , '' . join ( acronym ) ) return s
Detect acronyms based on runs of upper - case letters .
90
13
10,268
def _sanitize_acronyms ( unsafe_acronyms ) : valid_acronym = regex . compile ( u'^[\p{Ll}\p{Lu}\p{Nd}]+$' ) acronyms = [ ] for a in unsafe_acronyms : if valid_acronym . match ( a ) : acronyms . append ( a . upper ( ) ) else : raise InvalidAcronymError ( a ) return acronyms
Check acronyms against regex .
107
7
10,269
def _normalize_words ( words , acronyms ) : for i , _ in enumerate ( words ) : # if detect_acronyms: if words [ i ] . upper ( ) in acronyms : # Convert known acronyms to upper-case. words [ i ] = words [ i ] . upper ( ) else : # Fallback behavior: Preserve case on upper-case words. if not words [ i ] . isupper ( ) : words [ i ] = words [ i ] . capitalize ( ) return words
Normalize case of each word to PascalCase .
115
10
10,270
def _separate_words ( string ) : words = [ ] separator = "" # Index of current character. Initially 1 because we don't want to check # if the 0th character is a boundary. i = 1 # Index of first character in a sequence s = 0 # Previous character. p = string [ 0 : 1 ] # Treat an all-caps stringiable as lower-case, so that every letter isn't # counted as a boundary. was_upper = False if string . isupper ( ) : string = string . lower ( ) was_upper = True # Iterate over each character, checking for boundaries, or places where # the stringiable should divided. while i <= len ( string ) : c = string [ i : i + 1 ] split = False if i < len ( string ) : # Detect upper-case letter as boundary. if UPPER . match ( c ) : split = True # Detect transition from separator to not separator. elif NOTSEP . match ( c ) and SEP . match ( p ) : split = True # Detect transition not separator to separator. elif SEP . match ( c ) and NOTSEP . match ( p ) : split = True else : # The loop goes one extra iteration so that it can handle the # remaining text after the last boundary. split = True if split : if NOTSEP . match ( p ) : words . append ( string [ s : i ] ) else : # stringiable contains at least one separator. # Use the first one as the stringiable's primary separator. if not separator : separator = string [ s : s + 1 ] # Use None to indicate a separator in the word list. words . append ( None ) # If separators weren't included in the list, then breaks # between upper-case sequences ("AAA_BBB") would be # disregarded; the letter-run detector would count them as one # sequence ("AAABBB"). s = i i += 1 p = c return words , separator , was_upper
Segment string on separator into list of words .
433
11
10,271
def parse_case ( string , acronyms = None , preserve_case = False ) : words , separator , was_upper = _separate_words ( string ) if acronyms : # Use advanced acronym detection with list acronyms = _sanitize_acronyms ( acronyms ) check_acronym = _advanced_acronym_detection else : acronyms = [ ] # Fallback to simple acronym detection. check_acronym = _simple_acronym_detection # Letter-run detector # Index of current word. i = 0 # Index of first letter in run. s = None # Find runs of single upper-case letters. while i < len ( words ) : word = words [ i ] if word is not None and UPPER . match ( word ) : if s is None : s = i elif s is not None : i = check_acronym ( s , i , words , acronyms ) + 1 s = None i += 1 if s is not None : check_acronym ( s , i , words , acronyms ) # Separators are no longer needed, so they can be removed. They *should* # be removed, since it's supposed to be a *word* list. words = [ w for w in words if w is not None ] # Determine case type. case_type = _determine_case ( was_upper , words , string ) if preserve_case : if was_upper : words = [ w . upper ( ) for w in words ] else : words = _normalize_words ( words , acronyms ) return words , case_type , separator
Parse a stringiable into a list of words .
363
11
10,272
def send_email ( self , user , subject , msg ) : print ( 'To:' , user ) print ( 'Subject:' , subject ) print ( msg )
Should be overwritten in the setup
34
7
10,273
def update ( self , new_data : IntentDict ) : for locale , data in new_data . items ( ) : if locale not in self . dict : self . dict [ locale ] = { } self . dict [ locale ] . update ( data )
Receive an update from the loaders .
55
9
10,274
def get ( self , key : Text , locale : Optional [ Text ] ) -> List [ Tuple [ Text , ... ] ] : locale = self . choose_locale ( locale ) return self . dict [ locale ] [ key ]
Get a single set of intents .
49
8
10,275
async def strings ( self , request : Optional [ 'Request' ] = None ) -> List [ Tuple [ Text , ... ] ] : if request : locale = await request . get_locale ( ) else : locale = None return self . db . get ( self . key , locale )
For the given request find the list of strings of that intent . If the intent does not exist it will raise a KeyError .
62
26
10,276
def get_unitless_standard_names ( ) : global _UNITLESS_DB if _UNITLESS_DB is None : with open ( resource_filename ( 'cc_plugin_ncei' , 'data/unitless.json' ) , 'r' ) as f : _UNITLESS_DB = json . load ( f ) return _UNITLESS_DB
Returns a list of valid standard_names that are allowed to be unitless
82
15
10,277
def get_lat_variable ( nc ) : if 'latitude' in nc . variables : return 'latitude' latitudes = nc . get_variables_by_attributes ( standard_name = "latitude" ) if latitudes : return latitudes [ 0 ] . name return None
Returns the variable for latitude
66
5
10,278
def get_lon_variable ( nc ) : if 'longitude' in nc . variables : return 'longitude' longitudes = nc . get_variables_by_attributes ( standard_name = "longitude" ) if longitudes : return longitudes [ 0 ] . name return None
Returns the variable for longitude
66
6
10,279
def get_crs_variable ( ds ) : for var in ds . variables : grid_mapping = getattr ( ds . variables [ var ] , 'grid_mapping' , '' ) if grid_mapping and grid_mapping in ds . variables : return grid_mapping return None
Returns the name of the variable identified by a grid_mapping attribute
68
14
10,280
def is_2d_regular_grid ( nc , variable ) : # x(x), y(y), t(t) # X(t, y, x) dims = nc . variables [ variable ] . dimensions cmatrix = coordinate_dimension_matrix ( nc ) for req in ( 'x' , 'y' , 't' ) : if req not in cmatrix : return False x = get_lon_variable ( nc ) y = get_lat_variable ( nc ) t = get_time_variable ( nc ) if cmatrix [ 'x' ] != ( x , ) : return False if cmatrix [ 'y' ] != ( y , ) : return False if cmatrix [ 't' ] != ( t , ) : return False # Relaxed dimension ordering if len ( dims ) == 3 and x in dims and y in dims and t in dims : return True return False
Returns True if the variable is a 2D Regular grid .
208
12
10,281
def handle_read ( repo , * * kwargs ) : log . info ( 'read: %s %s' % ( repo , kwargs ) ) if type ( repo ) in [ unicode , str ] : return { 'name' : 'Repo' , 'desc' : 'Welcome to Grit' , 'comment' : '' } else : return repo . serialize ( )
handles reading repo information
85
5
10,282
def dict_from_object ( obj : object ) : # If object is a dict instance, no need to convert. return ( obj if isinstance ( obj , dict ) else { attr : getattr ( obj , attr ) for attr in dir ( obj ) if not attr . startswith ( '_' ) } )
Convert a object into dictionary with all of its readable attributes .
72
13
10,283
def xgetattr ( obj : object , name : str , default = _sentinel , getitem = False ) : if isinstance ( obj , dict ) : if getitem : # In tune with `dict.__getitem__` method. return obj [ name ] else : # In tune with `dict.get` method. val = obj . get ( name , default ) return None if val is _sentinel else val else : # If object is not a dict, in tune with `getattr` method. val = getattr ( obj , name , default ) if val is _sentinel : msg = '%r object has no attribute %r' % ( obj . __class__ , name ) raise AttributeError ( msg ) else : return val
Get attribute value from object .
160
6
10,284
def list_config_files ( ) -> List [ Text ] : return [ os . path . join ( os . path . dirname ( __file__ ) , 'default_settings.py' ) , os . getenv ( ENVIRONMENT_VARIABLE , '' ) , ]
This function returns the list of configuration files to load .
63
11
10,285
def camel_to_snake_case ( name ) : pattern = r'[A-Z][a-z]+|[A-Z]+(?![a-z])' return '_' . join ( map ( str . lower , re . findall ( pattern , name ) ) )
Takes a camelCased string and converts to snake_case .
64
14
10,286
def match_prefix ( prefix , line ) : m = re . match ( prefix , line . expandtabs ( 4 ) ) if not m : if re . match ( prefix , line . expandtabs ( 4 ) . replace ( '\n' , ' ' * 99 + '\n' ) ) : return len ( line ) - 1 return - 1 pos = m . end ( ) if pos == 0 : return 0 for i in range ( 1 , len ( line ) + 1 ) : if len ( line [ : i ] . expandtabs ( 4 ) ) >= pos : return i
Check if the line starts with given prefix and return the position of the end of prefix . If the prefix is not matched return - 1 .
126
28
10,287
def expect_re ( self , regexp ) : prefix_len = self . match_prefix ( self . prefix , self . next_line ( require_prefix = False ) ) if prefix_len >= 0 : match = self . _expect_re ( regexp , self . pos + prefix_len ) self . match = match return match else : return None
Test against the given regular expression and returns the match object .
77
12
10,288
def next_line ( self , require_prefix = True ) : if require_prefix : m = self . expect_re ( r'(?m)[^\n]*?$\n?' ) else : m = self . _expect_re ( r'(?m)[^\n]*$\n?' , self . pos ) self . match = m if m : return m . group ( )
Return the next line in the source .
90
8
10,289
def consume ( self ) : if self . match : self . pos = self . match . end ( ) if self . match . group ( ) [ - 1 ] == '\n' : self . _update_prefix ( ) self . match = None
Consume the body of source . pos will move forward .
53
12
10,290
def asDict ( self ) : ret = { } for field in BackgroundTaskInfo . FIELDS : ret [ field ] = getattr ( self , field ) return ret
asDict - Returns a copy of the current state as a dictionary . This copy will not be updated automatically .
37
23
10,291
def register ( name , _callable = None ) : def wrapper ( _callable ) : registered_checks [ name ] = _callable return _callable # If function or class is given, do the registeration if _callable : return wrapper ( _callable ) return wrapper
A decorator used for register custom check .
60
9
10,292
def serialize ( self ) : if self . response is not None : return { 'messaging_type' : 'RESPONSE' } if self . update is not None : return { 'messaging_type' : 'UPDATE' } if self . tag is not None : return { 'messaging_type' : 'MESSAGE_TAG' , 'tag' : self . tag . value , } if self . subscription is not None : return { 'messaging_type' : 'NON_PROMOTIONAL_SUBSCRIPTION' }
Generates the messaging - type - related part of the message dictionary .
121
14
10,293
async def patch_register ( self , register : Dict , request : 'Request' ) : register [ 'choices' ] = { o . slug : { 'intent' : o . intent . key if o . intent else None , 'text' : await render ( o . text , request ) , } for o in self . options if isinstance ( o , QuickRepliesList . TextOption ) } return register
Store all options in the choices sub - register . We store both the text and the potential intent in order to match both regular quick reply clicks but also the user typing stuff on his keyboard that matches more or less the content of quick replies .
89
48
10,294
def is_sharable ( self ) : return bool ( self . sharable and all ( x . is_sharable ( ) for x in self . elements ) )
Can only be sharable if marked as such and no child element is blocking sharing due to security reasons .
37
21
10,295
def from_value ( cls , value ) : warnings . warn ( "{0}.{1} will be deprecated in a future release. " "Please use {0}.{2} instead" . format ( cls . __name__ , cls . from_value . __name__ , cls . get . __name__ ) , PendingDeprecationWarning ) return cls [ value ]
Return the EChoice object associated with this value if any .
85
12
10,296
def bernard_auth ( func ) : @ wraps ( func ) async def wrapper ( request : Request ) : def get_query_token ( ) : token_key = settings . WEBVIEW_TOKEN_KEY return request . query . get ( token_key , '' ) def get_header_token ( ) : header_key = settings . WEBVIEW_HEADER_NAME return request . headers . get ( header_key , '' ) try : token = next ( filter ( None , [ get_header_token ( ) , get_query_token ( ) , ] ) ) except StopIteration : token = '' try : body = await request . json ( ) except ValueError : body = None msg , platform = await manager . message_from_token ( token , body ) if not msg : return json_response ( { 'status' : 'unauthorized' , 'message' : 'No valid token found' , } , status = 401 ) return await func ( msg , platform ) return wrapper
Authenticates the users based on the query - string - provided token
215
13
10,297
async def postback_me ( msg : BaseMessage , platform : Platform ) -> Response : async def get_basic_info ( _msg : BaseMessage , _platform : Platform ) : user = _msg . get_user ( ) return { 'friendly_name' : await user . get_friendly_name ( ) , 'locale' : await user . get_locale ( ) , 'platform' : _platform . NAME , } func = MiddlewareManager . instance ( ) . get ( 'api_postback_me' , get_basic_info ) return json_response ( await func ( msg , platform ) )
Provides the front - end with details about the user . This output can be completed using the api_postback_me middleware hook .
134
29
10,298
async def postback_send ( msg : BaseMessage , platform : Platform ) -> Response : await platform . inject_message ( msg ) return json_response ( { 'status' : 'ok' , } )
Injects the POST body into the FSM as a Postback message .
45
16
10,299
async def postback_analytics ( msg : BaseMessage , platform : Platform ) -> Response : try : pb = msg . get_layers ( ) [ 0 ] assert isinstance ( pb , Postback ) user = msg . get_user ( ) user_lang = await user . get_locale ( ) user_id = user . id if pb . payload [ 'event' ] == 'page_view' : func = 'page_view' path = pb . payload [ 'path' ] title = pb . payload . get ( 'title' , '' ) args = [ path , title , user_id , user_lang ] else : return json_response ( { 'status' : 'unknown event' , 'message' : f'"{pb.payload["event"]}" is not a recognized ' f'analytics event' , } ) async for p in providers ( ) : await getattr ( p , func ) ( * args ) except ( KeyError , IndexError , AssertionError , TypeError ) : return json_response ( { 'status' : 'missing data' } , status = 400 ) else : return json_response ( { 'status' : 'ok' , } )
Makes a call to an analytics function .
262
9