idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
242,000
def on_attribute ( self , node ) : # ('value', 'attr', 'ctx') ctx = node . ctx . __class__ if ctx == ast . Store : msg = "attribute for storage: shouldn't be here!" self . raise_exception ( node , exc = RuntimeError , msg = msg ) sym = self . run ( node . value ) if ctx == ast . Del : return delattr ( sym , node . attr ) # ctx is ast.Load fmt = "cannnot access attribute '%s' for %s" if node . attr not in UNSAFE_ATTRS : fmt = "no attribute '%s' for %s" try : return getattr ( sym , node . attr ) except AttributeError : pass # AttributeError or accessed unsafe attribute obj = self . run ( node . value ) msg = fmt % ( node . attr , obj ) self . raise_exception ( node , exc = AttributeError , msg = msg )
Extract attribute .
217
4
242,001
def on_assign ( self , node ) : # ('targets', 'value') val = self . run ( node . value ) for tnode in node . targets : self . node_assign ( tnode , val ) return
Simple assignment .
51
3
242,002
def on_augassign ( self , node ) : # ('target', 'op', 'value') return self . on_assign ( ast . Assign ( targets = [ node . target ] , value = ast . BinOp ( left = node . target , op = node . op , right = node . value ) ) )
Augmented assign .
70
4
242,003
def on_slice ( self , node ) : # ():('lower', 'upper', 'step') return slice ( self . run ( node . lower ) , self . run ( node . upper ) , self . run ( node . step ) )
Simple slice .
52
3
242,004
def on_extslice ( self , node ) : # ():('dims',) return tuple ( [ self . run ( tnode ) for tnode in node . dims ] )
Extended slice .
40
4
242,005
def on_delete ( self , node ) : # ('targets',) for tnode in node . targets : if tnode . ctx . __class__ != ast . Del : break children = [ ] while tnode . __class__ == ast . Attribute : children . append ( tnode . attr ) tnode = tnode . value if tnode . __class__ == ast . Name and tnode . id not in self . readonly_symbols : children . append ( tnode . id ) children . reverse ( ) self . symtable . pop ( '.' . join ( children ) ) else : msg = "could not delete symbol" self . raise_exception ( node , msg = msg )
Delete statement .
154
3
242,006
def on_unaryop ( self , node ) : # ('op', 'operand') return op2func ( node . op ) ( self . run ( node . operand ) )
Unary operator .
40
4
242,007
def on_binop ( self , node ) : # ('left', 'op', 'right') return op2func ( node . op ) ( self . run ( node . left ) , self . run ( node . right ) )
Binary operator .
49
4
242,008
def on_boolop ( self , node ) : # ('op', 'values') val = self . run ( node . values [ 0 ] ) is_and = ast . And == node . op . __class__ if ( is_and and val ) or ( not is_and and not val ) : for n in node . values [ 1 : ] : val = op2func ( node . op ) ( val , self . run ( n ) ) if ( is_and and not val ) or ( not is_and and val ) : break return val
Boolean operator .
118
4
242,009
def _printer ( self , * out , * * kws ) : flush = kws . pop ( 'flush' , True ) fileh = kws . pop ( 'file' , self . writer ) sep = kws . pop ( 'sep' , ' ' ) end = kws . pop ( 'sep' , '\n' ) print ( * out , file = fileh , sep = sep , end = end ) if flush : fileh . flush ( )
Generic print function .
104
4
242,010
def on_if ( self , node ) : # ('test', 'body', 'orelse') block = node . body if not self . run ( node . test ) : block = node . orelse for tnode in block : self . run ( tnode )
Regular if - then - else statement .
58
8
242,011
def on_ifexp ( self , node ) : # ('test', 'body', 'orelse') expr = node . orelse if self . run ( node . test ) : expr = node . body return self . run ( expr )
If expressions .
52
3
242,012
def on_while ( self , node ) : # ('test', 'body', 'orelse') while self . run ( node . test ) : self . _interrupt = None for tnode in node . body : self . run ( tnode ) if self . _interrupt is not None : break if isinstance ( self . _interrupt , ast . Break ) : break else : for tnode in node . orelse : self . run ( tnode ) self . _interrupt = None
While blocks .
107
3
242,013
def on_for ( self , node ) : # ('target', 'iter', 'body', 'orelse') for val in self . run ( node . iter ) : self . node_assign ( node . target , val ) self . _interrupt = None for tnode in node . body : self . run ( tnode ) if self . _interrupt is not None : break if isinstance ( self . _interrupt , ast . Break ) : break else : for tnode in node . orelse : self . run ( tnode ) self . _interrupt = None
For blocks .
125
3
242,014
def on_listcomp ( self , node ) : # ('elt', 'generators') out = [ ] for tnode in node . generators : if tnode . __class__ == ast . comprehension : for val in self . run ( tnode . iter ) : self . node_assign ( tnode . target , val ) add = True for cond in tnode . ifs : add = add and self . run ( cond ) if add : out . append ( self . run ( node . elt ) ) return out
List comprehension .
111
3
242,015
def on_excepthandler ( self , node ) : # ('type', 'name', 'body') return ( self . run ( node . type ) , node . name , node . body )
Exception handler ...
44
3
242,016
def on_call ( self , node ) : # ('func', 'args', 'keywords'. Py<3.5 has 'starargs' and 'kwargs' too) func = self . run ( node . func ) if not hasattr ( func , '__call__' ) and not isinstance ( func , type ) : msg = "'%s' is not callable!!" % ( func ) self . raise_exception ( node , exc = TypeError , msg = msg ) args = [ self . run ( targ ) for targ in node . args ] starargs = getattr ( node , 'starargs' , None ) if starargs is not None : args = args + self . run ( starargs ) keywords = { } if six . PY3 and func == print : keywords [ 'file' ] = self . writer for key in node . keywords : if not isinstance ( key , ast . keyword ) : msg = "keyword error in function call '%s'" % ( func ) self . raise_exception ( node , msg = msg ) keywords [ key . arg ] = self . run ( key . value ) kwargs = getattr ( node , 'kwargs' , None ) if kwargs is not None : keywords . update ( self . run ( kwargs ) ) try : return func ( * args , * * keywords ) except Exception as ex : self . raise_exception ( node , msg = "Error running function call '%s' with args %s and " "kwargs %s: %s" % ( func . __name__ , args , keywords , ex ) )
Function execution .
346
3
242,017
def on_functiondef ( self , node ) : # ('name', 'args', 'body', 'decorator_list') if node . decorator_list : raise Warning ( "decorated procedures not supported!" ) kwargs = [ ] if not valid_symbol_name ( node . name ) or node . name in self . readonly_symbols : errmsg = "invalid function name (reserved word?) %s" % node . name self . raise_exception ( node , exc = NameError , msg = errmsg ) offset = len ( node . args . args ) - len ( node . args . defaults ) for idef , defnode in enumerate ( node . args . defaults ) : defval = self . run ( defnode ) keyval = self . run ( node . args . args [ idef + offset ] ) kwargs . append ( ( keyval , defval ) ) if version_info [ 0 ] == 3 : args = [ tnode . arg for tnode in node . args . args [ : offset ] ] else : args = [ tnode . id for tnode in node . args . args [ : offset ] ] doc = None nb0 = node . body [ 0 ] if isinstance ( nb0 , ast . Expr ) and isinstance ( nb0 . value , ast . Str ) : doc = nb0 . value . s varkws = node . args . kwarg vararg = node . args . vararg if version_info [ 0 ] == 3 : if isinstance ( vararg , ast . arg ) : vararg = vararg . arg if isinstance ( varkws , ast . arg ) : varkws = varkws . arg self . symtable [ node . name ] = Procedure ( node . name , self , doc = doc , lineno = self . lineno , body = node . body , args = args , kwargs = kwargs , vararg = vararg , varkws = varkws ) if node . name in self . no_deepcopy : self . no_deepcopy . remove ( node . name )
Define procedures .
461
4
242,018
def safe_pow ( base , exp ) : if exp > MAX_EXPONENT : raise RuntimeError ( "Invalid exponent, max exponent is {}" . format ( MAX_EXPONENT ) ) return base ** exp
safe version of pow
47
4
242,019
def safe_mult ( a , b ) : if isinstance ( a , str ) and isinstance ( b , int ) and len ( a ) * b > MAX_STR_LEN : raise RuntimeError ( "String length exceeded, max string length is {}" . format ( MAX_STR_LEN ) ) return a * b
safe version of multiply
71
4
242,020
def safe_add ( a , b ) : if isinstance ( a , str ) and isinstance ( b , str ) and len ( a ) + len ( b ) > MAX_STR_LEN : raise RuntimeError ( "String length exceeded, max string length is {}" . format ( MAX_STR_LEN ) ) return a + b
safe version of add
74
4
242,021
def safe_lshift ( a , b ) : if b > MAX_SHIFT : raise RuntimeError ( "Invalid left shift, max left shift is {}" . format ( MAX_SHIFT ) ) return a << b
safe version of lshift
47
5
242,022
def valid_symbol_name ( name ) : if name in RESERVED_WORDS : return False gen = generate_tokens ( io . BytesIO ( name . encode ( 'utf-8' ) ) . readline ) typ , _ , start , end , _ = next ( gen ) if typ == tk_ENCODING : typ , _ , start , end , _ = next ( gen ) return typ == tk_NAME and start == ( 1 , 0 ) and end == ( 1 , len ( name ) )
Determine whether the input symbol name is a valid name .
117
13
242,023
def make_symbol_table ( use_numpy = True , * * kws ) : symtable = { } for sym in FROM_PY : if sym in builtins : symtable [ sym ] = builtins [ sym ] for sym in FROM_MATH : if hasattr ( math , sym ) : symtable [ sym ] = getattr ( math , sym ) if HAS_NUMPY and use_numpy : for sym in FROM_NUMPY : if hasattr ( numpy , sym ) : symtable [ sym ] = getattr ( numpy , sym ) for name , sym in NUMPY_RENAMES . items ( ) : if hasattr ( numpy , sym ) : symtable [ name ] = getattr ( numpy , sym ) symtable . update ( LOCALFUNCS ) symtable . update ( kws ) return symtable
Create a default symboltable taking dict of user - defined symbols .
188
14
242,024
def get_error ( self ) : col_offset = - 1 if self . node is not None : try : col_offset = self . node . col_offset except AttributeError : pass try : exc_name = self . exc . __name__ except AttributeError : exc_name = str ( self . exc ) if exc_name in ( None , 'None' ) : exc_name = 'UnknownError' out = [ " %s" % self . expr ] if col_offset > 0 : out . append ( " %s^^^" % ( ( col_offset ) * ' ' ) ) out . append ( str ( self . msg ) ) return ( exc_name , '\n' . join ( out ) )
Retrieve error data .
158
5
242,025
def add_config_path ( self , path ) : abspath = util . abs_pathify ( path ) if abspath not in self . _config_paths : log . info ( "Adding {0} to paths to search" . format ( abspath ) ) self . _config_paths . append ( abspath )
Add a path for Vyper to search for the config file in . Can be called multiple times to define multiple search paths .
71
26
242,026
def sub ( self , key ) : subv = Vyper ( ) data = self . get ( key ) if isinstance ( data , dict ) : subv . _config = data return subv else : return None
Returns new Vyper instance representing a sub tree of this instance .
47
14
242,027
def unmarshall_key ( self , key , cls ) : return setattr ( cls , key , self . get ( key ) )
Takes a single key and unmarshalls it into a class .
31
14
242,028
def unmarshall ( self , cls ) : for k , v in self . all_settings ( ) . items ( ) : setattr ( cls , k , v ) return cls
Unmarshalls the config into a class . Make sure that the tags on the attributes of the class are properly set .
41
25
242,029
def bind_env ( self , * input_ ) : if len ( input_ ) == 0 : return "bind_env missing key to bind to" key = input_ [ 0 ] . lower ( ) if len ( input_ ) == 1 : env_key = self . _merge_with_env_prefix ( key ) else : env_key = input_ [ 1 ] self . _env [ key ] = env_key if self . _key_delimiter in key : parts = input_ [ 0 ] . split ( self . _key_delimiter ) env_info = { "path" : parts [ 1 : - 1 ] , "final_key" : parts [ - 1 ] , "env_key" : env_key } if self . _env . get ( parts [ 0 ] ) is None : self . _env [ parts [ 0 ] ] = [ env_info ] else : self . _env [ parts [ 0 ] ] . append ( env_info ) return None
Binds a Vyper key to a ENV variable . ENV variables are case sensitive . If only a key is provided it will use the env key matching the key uppercased . env_prefix will be used when set when env name is not provided .
215
54
242,030
def is_set ( self , key ) : path = key . split ( self . _key_delimiter ) lower_case_key = key . lower ( ) val = self . _find ( lower_case_key ) if val is None : source = self . _find ( path [ 0 ] . lower ( ) ) if source is not None and isinstance ( source , dict ) : val = self . _search_dict ( source , path [ 1 : : ] ) return val is not None
Check to see if the key has been set in any of the data locations .
107
16
242,031
def register_alias ( self , alias , key ) : alias = alias . lower ( ) key = key . lower ( ) if alias != key and alias != self . _real_key ( key ) : exists = self . _aliases . get ( alias ) if exists is None : # if we alias something that exists in one of the dicts to # another name, we'll never be able to get that value using the # original name, so move the config value to the new _real_key. val = self . _config . get ( alias ) if val : self . _config . pop ( alias ) self . _config [ key ] = val val = self . _kvstore . get ( alias ) if val : self . _kvstore . pop ( alias ) self . _kvstore [ key ] = val val = self . _defaults . get ( alias ) if val : self . _defaults . pop ( alias ) self . _defaults [ key ] = val val = self . _override . get ( alias ) if val : self . _override . pop ( alias ) self . _override [ key ] = val self . _aliases [ alias ] = key else : log . warning ( "Creating circular reference alias {0} {1} {2}" . format ( alias , key , self . _real_key ( key ) ) )
Aliases provide another accessor for the same key . This enables one to change a name without breaking the application .
294
23
242,032
def set_default ( self , key , value ) : k = self . _real_key ( key . lower ( ) ) self . _defaults [ k ] = value
Set the default value for this key . Default only used when no value is provided by the user via arg config or env .
37
25
242,033
def _unmarshall_reader ( self , file_ , d ) : return util . unmarshall_config_reader ( file_ , d , self . _get_config_type ( ) )
Unmarshall a file into a dict .
44
10
242,034
def _get_key_value_config ( self ) : for rp in self . _remote_providers : val = self . _get_remote_config ( rp ) self . _kvstore = val return None raise errors . RemoteConfigError ( "No Files Found" )
Retrieves the first found remote configuration .
62
9
242,035
def all_keys ( self , uppercase_keys = False ) : d = { } for k in self . _override . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _args . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _env . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _config . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _kvstore . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _defaults . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } for k in self . _aliases . keys ( ) : d [ k . upper ( ) if uppercase_keys else k . lower ( ) ] = { } return d . keys ( )
Return all keys regardless where they are set .
276
9
242,036
def all_settings ( self , uppercase_keys = False ) : d = { } for k in self . all_keys ( uppercase_keys ) : d [ k ] = self . get ( k ) return d
Return all settings as a dict .
49
7
242,037
def debug ( self ) : # pragma: no cover print ( "Aliases:" ) pprint . pprint ( self . _aliases ) print ( "Override:" ) pprint . pprint ( self . _override ) print ( "Args:" ) pprint . pprint ( self . _args ) print ( "Env:" ) pprint . pprint ( self . _env ) print ( "Config:" ) pprint . pprint ( self . _config ) print ( "Key/Value Store:" ) pprint . pprint ( self . _kvstore ) print ( "Defaults:" ) pprint . pprint ( self . _defaults )
Prints all configuration registries for debugging purposes .
142
10
242,038
def server ( * * kwargs ) : start_server ( * * { k : v for k , v in kwargs . items ( ) if v } , blocking = True )
Starts the Clearly Server .
40
6
242,039
def start_server ( broker , backend = None , port = 12223 , max_tasks = 10000 , max_workers = 100 , blocking = False , debug = False ) : # pragma: no cover _setup_logging ( debug ) queue_listener_dispatcher = Queue ( ) listener = EventListener ( broker , queue_listener_dispatcher , backend = backend , max_tasks_in_memory = max_tasks , max_workers_in_memory = max_workers ) dispatcher = StreamingDispatcher ( queue_listener_dispatcher ) clearlysrv = ClearlyServer ( listener , dispatcher ) return _serve ( clearlysrv , port , blocking )
Starts a Clearly Server programmatically .
151
8
242,040
def _event_to_pb ( event ) : if isinstance ( event , ( TaskData , Task ) ) : key , klass = 'task' , clearly_pb2 . TaskMessage elif isinstance ( event , ( WorkerData , Worker ) ) : key , klass = 'worker' , clearly_pb2 . WorkerMessage else : raise ValueError ( 'unknown event' ) keys = klass . DESCRIPTOR . fields_by_name . keys ( ) # noinspection PyProtectedMember data = { k : v for k , v in getattr ( event , '_asdict' , # internal TaskData and WorkerData lambda : { f : getattr ( event , f ) for f in event . _fields } ) # celery Task and Worker ( ) . items ( ) if k in keys } return key , klass ( * * data )
Supports converting internal TaskData and WorkerData as well as celery Task and Worker to proto buffers messages .
186
22
242,041
def filter_tasks ( self , request , context ) : _log_request ( request , context ) tasks_pattern , tasks_negate = PATTERN_PARAMS_OP ( request . tasks_filter ) state_pattern = request . state_pattern limit , reverse = request . limit , request . reverse pregex = re . compile ( tasks_pattern ) # pattern filter condition sregex = re . compile ( state_pattern ) # state filter condition def pcondition ( task ) : return accepts ( pregex , tasks_negate , task . name , task . routing_key ) def scondition ( task ) : return accepts ( sregex , tasks_negate , task . state ) found_tasks = ( task for _ , task in self . listener . memory . tasks_by_time ( limit = limit or None , reverse = reverse ) if pcondition ( task ) and scondition ( task ) ) def callback ( t ) : logger . debug ( '%s iterated %d tasks in %s (%s)' , self . filter_tasks . __name__ , t . count , t . duration_human , t . throughput_human ) for task in about_time ( callback , found_tasks ) : yield ClearlyServer . _event_to_pb ( task ) [ 1 ]
Filter tasks by matching patterns to name routing key and state .
279
12
242,042
def filter_workers ( self , request , context ) : _log_request ( request , context ) workers_pattern , workers_negate = PATTERN_PARAMS_OP ( request . workers_filter ) hregex = re . compile ( workers_pattern ) # hostname filter condition def hcondition ( worker ) : return accepts ( hregex , workers_negate , worker . hostname ) # pragma: no branch found_workers = ( worker for worker in sorted ( self . listener . memory . workers . values ( ) , key = WORKER_HOSTNAME_OP ) if hcondition ( worker ) ) def callback ( t ) : logger . debug ( '%s iterated %d workers in %s (%s)' , self . filter_workers . __name__ , t . count , t . duration_human , t . throughput_human ) for worker in about_time ( callback , found_workers ) : yield ClearlyServer . _event_to_pb ( worker ) [ 1 ]
Filter workers by matching a pattern to hostname .
213
10
242,043
def seen_tasks ( self , request , context ) : _log_request ( request , context ) result = clearly_pb2 . SeenTasksMessage ( ) result . task_types . extend ( self . listener . memory . task_types ( ) ) return result
Returns all seen task types .
57
6
242,044
def reset_tasks ( self , request , context ) : _log_request ( request , context ) self . listener . memory . clear_tasks ( ) return clearly_pb2 . Empty ( )
Resets all captured tasks .
43
6
242,045
def get_stats ( self , request , context ) : _log_request ( request , context ) m = self . listener . memory return clearly_pb2 . StatsMessage ( task_count = m . task_count , event_count = m . event_count , len_tasks = len ( m . tasks ) , len_workers = len ( m . workers ) )
Returns the server statistics .
80
5
242,046
def accepts ( regex , negate , * values ) : return any ( v and regex . search ( v ) for v in values ) != negate
Given a compiled regex and a negate find if any of the values match .
29
15
242,047
def copy_update ( pb_message , * * kwds ) : result = pb_message . __class__ ( ) result . CopyFrom ( pb_message ) for k , v in kwds . items ( ) : setattr ( result , k , v ) return result
Returns a copy of the PB object with some fields updated .
63
12
242,048
def __start ( self ) : # pragma: no cover assert not self . dispatcher_thread self . dispatcher_thread = threading . Thread ( target = self . __run_dispatcher , name = 'clearly-dispatcher' ) self . dispatcher_thread . daemon = True self . running = True # graceful shutdown self . dispatcher_thread . start ( )
Starts the real - time engine that captures tasks .
79
11
242,049
def streaming_client ( self , tasks_regex , tasks_negate , workers_regex , workers_negate ) : cc = CapturingClient ( Queue ( ) , re . compile ( tasks_regex ) , tasks_negate , re . compile ( workers_regex ) , workers_negate ) self . observers . append ( cc ) yield cc . queue self . observers . remove ( cc )
Connects a client to the streaming capture filtering the events that are sent to it .
89
17
242,050
def __start ( self ) : # pragma: no cover assert not self . _listener_thread self . _listener_thread = threading . Thread ( target = self . __run_listener , name = 'clearly-listener' ) self . _listener_thread . daemon = True self . _listener_thread . start ( ) self . _wait_event . wait ( ) self . _wait_event . clear ( )
Starts the real - time engine that captures events .
97
11
242,051
def capture ( self , pattern = None , negate = False , workers = None , negate_workers = False , params = None , success = False , error = True , stats = False ) : request = clearly_pb2 . CaptureRequest ( tasks_capture = clearly_pb2 . PatternFilter ( pattern = pattern or '.' , negate = negate ) , workers_capture = clearly_pb2 . PatternFilter ( pattern = workers or '.' , negate = negate_workers ) , ) try : for realtime in self . _stub . capture_realtime ( request ) : if realtime . HasField ( 'task' ) : ClearlyClient . _display_task ( realtime . task , params , success , error ) elif realtime . HasField ( 'worker' ) : ClearlyClient . _display_worker ( realtime . worker , stats ) else : print ( 'unknown event:' , realtime ) break except KeyboardInterrupt : pass
Starts capturing selected events in real - time . You can filter exactly what you want to see as the Clearly Server handles all tasks and workers updates being sent to celery . Several clients can see different sets of events at the same time .
201
48
242,052
def tasks ( self , pattern = None , negate = False , state = None , limit = None , reverse = True , params = None , success = False , error = True ) : request = clearly_pb2 . FilterTasksRequest ( tasks_filter = clearly_pb2 . PatternFilter ( pattern = pattern or '.' , negate = negate ) , state_pattern = state or '.' , limit = limit , reverse = reverse ) for task in about_time ( ClearlyClient . _fetched_callback , self . _stub . filter_tasks ( request ) ) : ClearlyClient . _display_task ( task , params , success , error )
Filters stored tasks and displays their current statuses .
137
11
242,053
def seen_tasks ( self ) : print ( '\n' . join ( self . _stub . seen_tasks ( clearly_pb2 . Empty ( ) ) . task_types ) )
Shows a list of seen task types .
44
9
242,054
def detail_action ( * * kwargs ) : def decorator ( func ) : func . action = True func . detail = True func . kwargs = kwargs return func return decorator
Used to mark a method on a ResourceBinding that should be routed for detail actions .
43
18
242,055
def list_action ( * * kwargs ) : def decorator ( func ) : func . action = True func . detail = False func . kwargs = kwargs return func return decorator
Used to mark a method on a ResourceBinding that should be routed for list actions .
43
18
242,056
def broadcast_to ( array , shape , subok = False ) : return _broadcast_to ( array , shape , subok = subok , readonly = True )
Broadcast an array to a new shape .
37
9
242,057
def _K ( m ) : M = m * ( m - 1 ) // 2 K = np . zeros ( ( M , m ** 2 ) , dtype = np . int64 ) row = 0 for j in range ( 1 , m ) : col = ( j - 1 ) * m + j s = m - j K [ row : ( row + s ) , col : ( col + s ) ] = np . eye ( s ) row += s return K
matrix K_m from Wiktorsson2001
100
11
242,058
def wait_for_compactions ( self , timeout = 600 ) : for node in list ( self . nodes . values ( ) ) : if node . is_running ( ) : node . wait_for_compactions ( timeout ) return self
Wait for all compactions to finish on all nodes .
51
11
242,059
def watch_log_for_alive ( self , nodes , from_mark = None , timeout = 720 , filename = 'system.log' ) : super ( DseNode , self ) . watch_log_for_alive ( nodes , from_mark = from_mark , timeout = timeout , filename = filename )
Watch the log of this node until it detects that the provided other nodes are marked UP . This method works similarly to watch_log_for_death .
69
31
242,060
def load ( path , name , cluster ) : node_path = os . path . join ( path , name ) filename = os . path . join ( node_path , 'node.conf' ) with open ( filename , 'r' ) as f : data = yaml . safe_load ( f ) try : itf = data [ 'interfaces' ] initial_token = None if 'initial_token' in data : initial_token = data [ 'initial_token' ] cassandra_version = None if 'cassandra_version' in data : cassandra_version = LooseVersion ( data [ 'cassandra_version' ] ) remote_debug_port = 2000 if 'remote_debug_port' in data : remote_debug_port = data [ 'remote_debug_port' ] binary_interface = None if 'binary' in itf and itf [ 'binary' ] is not None : binary_interface = tuple ( itf [ 'binary' ] ) thrift_interface = None if 'thrift' in itf and itf [ 'thrift' ] is not None : thrift_interface = tuple ( itf [ 'thrift' ] ) node = cluster . create_node ( data [ 'name' ] , data [ 'auto_bootstrap' ] , thrift_interface , tuple ( itf [ 'storage' ] ) , data [ 'jmx_port' ] , remote_debug_port , initial_token , save = False , binary_interface = binary_interface , byteman_port = data [ 'byteman_port' ] , derived_cassandra_version = cassandra_version ) node . status = data [ 'status' ] if 'pid' in data : node . pid = int ( data [ 'pid' ] ) if 'install_dir' in data : node . __install_dir = data [ 'install_dir' ] if 'config_options' in data : node . __config_options = data [ 'config_options' ] if 'dse_config_options' in data : node . _dse_config_options = data [ 'dse_config_options' ] if 'environment_variables' in data : node . __environment_variables = data [ 'environment_variables' ] if 'data_center' in data : node . data_center = data [ 'data_center' ] if 'workloads' in data : node . workloads = data [ 'workloads' ] return node except KeyError as k : raise common . LoadError ( "Error Loading " + filename + ", missing property: " + str ( k ) )
Load a node from from the path on disk to the config files the node name and the cluster the node is part of .
571
25
242,061
def get_install_dir ( self ) : if self . __install_dir is None : return self . cluster . get_install_dir ( ) else : common . validate_install_dir ( self . __install_dir ) return self . __install_dir
Returns the path to the cassandra source directory used by this node .
56
14
242,062
def set_install_dir ( self , install_dir = None , version = None , verbose = False ) : if version is None : self . __install_dir = install_dir if install_dir is not None : common . validate_install_dir ( install_dir ) else : self . __install_dir = self . node_setup ( version , verbose = verbose ) self . _cassandra_version = common . get_version_from_build ( self . __install_dir , cassandra = True ) if self . get_base_cassandra_version ( ) >= 4.0 : self . network_interfaces [ 'thrift' ] = None self . import_config_files ( ) self . import_bin_files ( ) self . __conf_updated = False return self
Sets the path to the cassandra source directory for use by this node .
175
16
242,063
def show ( self , only_status = False , show_cluster = True ) : self . __update_status ( ) indent = '' . join ( [ " " for i in xrange ( 0 , len ( self . name ) + 2 ) ] ) print_ ( "{}: {}" . format ( self . name , self . __get_status_string ( ) ) ) if not only_status : if show_cluster : print_ ( "{}{}={}" . format ( indent , 'cluster' , self . cluster . name ) ) print_ ( "{}{}={}" . format ( indent , 'auto_bootstrap' , self . auto_bootstrap ) ) if self . network_interfaces [ 'thrift' ] is not None : print_ ( "{}{}={}" . format ( indent , 'thrift' , self . network_interfaces [ 'thrift' ] ) ) if self . network_interfaces [ 'binary' ] is not None : print_ ( "{}{}={}" . format ( indent , 'binary' , self . network_interfaces [ 'binary' ] ) ) print_ ( "{}{}={}" . format ( indent , 'storage' , self . network_interfaces [ 'storage' ] ) ) print_ ( "{}{}={}" . format ( indent , 'jmx_port' , self . jmx_port ) ) print_ ( "{}{}={}" . format ( indent , 'remote_debug_port' , self . remote_debug_port ) ) print_ ( "{}{}={}" . format ( indent , 'byteman_port' , self . byteman_port ) ) print_ ( "{}{}={}" . format ( indent , 'initial_token' , self . initial_token ) ) if self . pid : print_ ( "{}{}={}" . format ( indent , 'pid' , self . pid ) )
Print infos on this node configuration .
412
8
242,064
def is_running ( self ) : self . __update_status ( ) return self . status == Status . UP or self . status == Status . DECOMMISSIONED
Return true if the node is running
36
7
242,065
def grep_log ( self , expr , filename = 'system.log' , from_mark = None ) : matchings = [ ] pattern = re . compile ( expr ) with open ( os . path . join ( self . get_path ( ) , 'logs' , filename ) ) as f : if from_mark : f . seek ( from_mark ) for line in f : m = pattern . search ( line ) if m : matchings . append ( ( line , m ) ) return matchings
Returns a list of lines matching the regular expression in parameter in the Cassandra log of this node
108
18
242,066
def wait_for_binary_interface ( self , * * kwargs ) : if self . cluster . version ( ) >= '1.2' : self . watch_log_for ( "Starting listening for CQL clients" , * * kwargs ) binary_itf = self . network_interfaces [ 'binary' ] if not common . check_socket_listening ( binary_itf , timeout = 30 ) : warnings . warn ( "Binary interface %s:%s is not listening after 30 seconds, node may have failed to start." % ( binary_itf [ 0 ] , binary_itf [ 1 ] ) )
Waits for the Binary CQL interface to be listening . If > 1 . 2 will check log for Starting listening for CQL clients before checking for the interface to be listening .
139
36
242,067
def wait_for_thrift_interface ( self , * * kwargs ) : if self . cluster . version ( ) >= '4' : return self . watch_log_for ( "Listening for thrift clients..." , * * kwargs ) thrift_itf = self . network_interfaces [ 'thrift' ] if not common . check_socket_listening ( thrift_itf , timeout = 30 ) : warnings . warn ( "Thrift interface {}:{} is not listening after 30 seconds, node may have failed to start." . format ( thrift_itf [ 0 ] , thrift_itf [ 1 ] ) )
Waits for the Thrift interface to be listening .
143
11
242,068
def wait_for_compactions ( self , timeout = 120 ) : pattern = re . compile ( "pending tasks: 0" ) start = time . time ( ) while time . time ( ) - start < timeout : output , err , rc = self . nodetool ( "compactionstats" ) if pattern . search ( output ) : return time . sleep ( 1 ) raise TimeoutError ( "{} [{}] Compactions did not finish in {} seconds" . format ( time . strftime ( "%d %b %Y %H:%M:%S" , time . gmtime ( ) ) , self . name , timeout ) )
Wait for all compactions to finish on this node .
140
11
242,069
def update_startup_byteman_script ( self , byteman_startup_script ) : if self . byteman_port == '0' : raise common . LoadError ( 'Byteman is not installed' ) self . byteman_startup_script = byteman_startup_script self . import_config_files ( )
Update the byteman startup script i . e . rule injected before the node starts .
80
18
242,070
def _find_cmd ( self , cmd ) : cdir = self . get_install_cassandra_root ( ) if self . get_base_cassandra_version ( ) >= 2.1 : fcmd = common . join_bin ( cdir , os . path . join ( 'tools' , 'bin' ) , cmd ) else : fcmd = common . join_bin ( cdir , 'bin' , cmd ) try : if os . path . exists ( fcmd ) : os . chmod ( fcmd , stat . S_IRUSR | stat . S_IWUSR | stat . S_IXUSR | stat . S_IRGRP | stat . S_IXGRP | stat . S_IROTH | stat . S_IXOTH ) except : common . warning ( "Couldn't change permissions to use {0}." . format ( cmd ) ) common . warning ( "If it didn't work, you will have to do so manually." ) return fcmd
Locates command under cassandra root and fixes permissions if needed
218
12
242,071
def data_size ( self , live_data = None ) : if live_data is not None : warnings . warn ( "The 'live_data' keyword argument is deprecated." , DeprecationWarning ) output = self . nodetool ( 'info' ) [ 0 ] return _get_load_from_info_output ( output )
Uses nodetool info to get the size of a node s data in KB .
73
18
242,072
def get_sstable_data_files ( self , ks , table ) : p = self . get_sstable_data_files_process ( ks = ks , table = table ) out , _ , _ = handle_external_tool_process ( p , [ "sstableutil" , '--type' , 'final' , ks , table ] ) return sorted ( filter ( lambda s : s . endswith ( '-Data.db' ) , out . splitlines ( ) ) )
Read sstable data files by using sstableutil so we ignore temporary files
111
15
242,073
def is_modern_windows_install ( version ) : version = LooseVersion ( str ( version ) ) if is_win ( ) and version >= LooseVersion ( '2.1' ) : return True else : return False
The 2 . 1 release line was when Cassandra received beta windows support . Many features are gated based on that added compatibility .
49
25
242,074
def get_jdk_version ( ) : try : version = subprocess . check_output ( [ 'java' , '-version' ] , stderr = subprocess . STDOUT ) except OSError : print_ ( "ERROR: Could not find java. Is it in your path?" ) exit ( 1 ) return _get_jdk_version ( version )
Retrieve the Java version as reported in the quoted string returned by invoking java - version .
81
18
242,075
def wait_for_any_log ( nodes , pattern , timeout , filename = 'system.log' , marks = None ) : if marks is None : marks = { } for _ in range ( timeout ) : for node in nodes : found = node . grep_log ( pattern , filename = filename , from_mark = marks . get ( node , None ) ) if found : return node time . sleep ( 1 ) raise TimeoutError ( time . strftime ( "%d %b %Y %H:%M:%S" , time . gmtime ( ) ) + " Unable to find: " + repr ( pattern ) + " in any node log within " + str ( timeout ) + "s" )
Look for a pattern in the system . log of any in a given list of nodes .
152
18
242,076
def download_version ( version , url = None , verbose = False , binary = False ) : assert_jdk_valid_for_cassandra_version ( version ) archive_url = ARCHIVE if CCM_CONFIG . has_option ( 'repositories' , 'cassandra' ) : archive_url = CCM_CONFIG . get ( 'repositories' , 'cassandra' ) if binary : archive_url = "%s/%s/apache-cassandra-%s-bin.tar.gz" % ( archive_url , version . split ( '-' ) [ 0 ] , version ) if url is None else url else : archive_url = "%s/%s/apache-cassandra-%s-src.tar.gz" % ( archive_url , version . split ( '-' ) [ 0 ] , version ) if url is None else url _ , target = tempfile . mkstemp ( suffix = ".tar.gz" , prefix = "ccm-" ) try : __download ( archive_url , target , show_progress = verbose ) common . info ( "Extracting {} as version {} ..." . format ( target , version ) ) tar = tarfile . open ( target ) dir = tar . next ( ) . name . split ( "/" ) [ 0 ] # pylint: disable=all tar . extractall ( path = __get_dir ( ) ) tar . close ( ) target_dir = os . path . join ( __get_dir ( ) , version ) if os . path . exists ( target_dir ) : rmdirs ( target_dir ) shutil . move ( os . path . join ( __get_dir ( ) , dir ) , target_dir ) if binary : # Binary installs don't have a build.xml that is needed # for pulling the version from. Write the version number # into a file to read later in common.get_version_from_build() with open ( os . path . join ( target_dir , '0.version.txt' ) , 'w' ) as f : f . write ( version ) else : compile_version ( version , target_dir , verbose = verbose ) except urllib . error . URLError as e : msg = "Invalid version {}" . format ( version ) if url is None else "Invalid url {}" . format ( url ) msg = msg + " (underlying error is: {})" . format ( str ( e ) ) raise ArgumentError ( msg ) except tarfile . ReadError as e : raise ArgumentError ( "Unable to uncompress downloaded file: {}" . format ( str ( e ) ) ) except CCMError as e : # wipe out the directory if anything goes wrong. Otherwise we will assume it has been compiled the next time it runs. try : rmdirs ( target_dir ) common . error ( "Deleted {} due to error" . format ( target_dir ) ) except : raise CCMError ( "Building C* version {} failed. Attempted to delete {} but failed. This will need to be manually deleted" . format ( version , target_dir ) ) raise e
Download extract and build Cassandra tarball .
686
8
242,077
def get_tagged_version_numbers ( series = 'stable' ) : releases = [ ] if series == 'testing' : # Testing releases always have a hyphen after the version number: tag_regex = re . compile ( '^refs/tags/cassandra-([0-9]+\.[0-9]+\.[0-9]+-.*$)' ) else : # Stable and oldstable releases are just a number: tag_regex = re . compile ( '^refs/tags/cassandra-([0-9]+\.[0-9]+\.[0-9]+$)' ) tag_url = urllib . request . urlopen ( GITHUB_TAGS ) for ref in ( i . get ( 'ref' , '' ) for i in json . loads ( tag_url . read ( ) ) ) : m = tag_regex . match ( ref ) if m : releases . append ( LooseVersion ( m . groups ( ) [ 0 ] ) ) # Sort by semver: releases . sort ( reverse = True ) stable_major_version = LooseVersion ( str ( releases [ 0 ] . version [ 0 ] ) + "." + str ( releases [ 0 ] . version [ 1 ] ) ) stable_releases = [ r for r in releases if r >= stable_major_version ] oldstable_releases = [ r for r in releases if r not in stable_releases ] oldstable_major_version = LooseVersion ( str ( oldstable_releases [ 0 ] . version [ 0 ] ) + "." + str ( oldstable_releases [ 0 ] . version [ 1 ] ) ) oldstable_releases = [ r for r in oldstable_releases if r >= oldstable_major_version ] if series == 'testing' : return [ r . vstring for r in releases ] elif series == 'stable' : return [ r . vstring for r in stable_releases ] elif series == 'oldstable' : return [ r . vstring for r in oldstable_releases ] else : raise AssertionError ( "unknown release series: {series}" . format ( series = series ) )
Retrieve git tags and find version numbers for a release series
477
12
242,078
def __connect ( host , port , username , password , private_key ) : # Initialize the SSH connection ssh = paramiko . SSHClient ( ) ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) if private_key is not None and password is not None : private_key = paramiko . RSAKey . from_private_key_file ( private_key , password ) elif private_key is not None : private_key = paramiko . RSAKey . from_private_key_file ( private_key , password ) # Establish the SSH connection try : ssh . connect ( host , port , username , password , private_key ) except Exception as e : raise e # Return the established SSH connection return ssh
Establish remote connection
162
4
242,079
def execute_ccm_command ( self , ccm_args , is_displayed = True ) : return self . execute ( [ "ccm" ] + ccm_args , profile = self . profile )
Execute a CCM command on the remote server
46
10
242,080
def execute_python_script ( self , script ) : # Create the local file to copy to remote file_handle , filename = tempfile . mkstemp ( ) temp_file = os . fdopen ( file_handle , "wt" ) temp_file . write ( script ) temp_file . close ( ) # Put the file into the remote user directory self . put ( filename , "python_execute.py" ) command = [ "python" , "python_execute.py" ] # Execute the python script on the remote system, clean up, and return the output output = self . execute ( command , False ) self . remove ( "python_execute.py" ) os . unlink ( filename ) return output
Execute a python script of the remote server
155
9
242,081
def __put_dir ( self , ftp , local_path , remote_path = None ) : # Determine if local_path should be put into remote user directory if remote_path is None : remote_path = os . path . basename ( local_path ) remote_path += self . separator # Iterate over the local path and perform copy operations to remote server for current_path , directories , files in os . walk ( local_path ) : # Create the remote directory (if needed) try : ftp . listdir ( remote_path ) except IOError : ftp . mkdir ( remote_path ) # Copy the files in the current directory to the remote path for filename in files : ftp . put ( os . path . join ( current_path , filename ) , remote_path + filename ) # Copy the directory in the current directory to the remote path for directory in directories : self . __put_dir ( ftp , os . path . join ( current_path , directory ) , remote_path + directory )
Helper function to perform copy operation to remote server
220
9
242,082
def remove ( self , remote_path ) : # Based on the remote file stats; remove a file or directory recursively ftp = self . ssh . open_sftp ( ) if stat . S_ISDIR ( ftp . stat ( remote_path ) . st_mode ) : self . __remove_dir ( ftp , remote_path ) else : ftp . remove ( remote_path ) ftp . close ( )
Delete a file or directory recursively on the remote server
94
12
242,083
def __remove_dir ( self , ftp , remote_path ) : # Iterate over the remote path and perform remove operations files = ftp . listdir ( remote_path ) for filename in files : # Attempt to remove the file (if exception then path is directory) path = remote_path + self . separator + filename try : ftp . remove ( path ) except IOError : self . __remove_dir ( ftp , path ) # Remove the original directory requested ftp . rmdir ( remote_path )
Helper function to perform delete operation on the remote server
112
10
242,084
def usage ( self ) : # Retrieve the text for just the arguments usage = self . parser . format_help ( ) . split ( "optional arguments:" ) [ 1 ] # Remove any blank lines and return return "Remote Options:" + os . linesep + os . linesep . join ( [ s for s in usage . splitlines ( ) if s ] )
Get the usage for the remote exectuion options
77
11
242,085
def jwt_required ( realm = None ) : def wrapper ( fn ) : @ wraps ( fn ) def decorator ( * args , * * kwargs ) : _jwt_required ( realm or current_app . config [ 'JWT_DEFAULT_REALM' ] ) return fn ( * args , * * kwargs ) return decorator return wrapper
View decorator that requires a valid JWT token to be present in the request
80
16
242,086
def auth_request_handler ( self , callback ) : warnings . warn ( "This handler is deprecated. The recommended approach to have control over " "the authentication resource is to disable the built-in resource by " "setting JWT_AUTH_URL_RULE=None and registering your own authentication " "resource directly on your application." , DeprecationWarning , stacklevel = 2 ) self . auth_request_callback = callback return callback
Specifies the authentication response handler function .
93
8
242,087
def _svg_path ( self , pathcodes , data ) : def gen_path_elements ( pathcodes , data ) : counts = { 'M' : 1 , 'L' : 1 , 'C' : 3 , 'Z' : 0 } it = iter ( data ) for code in pathcodes : yield code for _ in range ( counts [ code ] ) : p = next ( it ) yield str ( p [ 0 ] ) yield str ( p [ 1 ] ) return ' ' . join ( gen_path_elements ( pathcodes , data ) )
Return the SVG path s d element .
122
8
242,088
def fig_to_html ( fig = None , template = 'base.html' , tiles = None , crs = None , epsg = None , embed_links = False , float_precision = 6 ) : if tiles is None : tiles = maptiles . osm elif isinstance ( tiles , six . string_types ) : if tiles not in maptiles . tiles : raise ValueError ( 'Unknown tile source "{}"' . format ( tiles ) ) else : tiles = maptiles . tiles [ tiles ] template = env . get_template ( template ) if fig is None : fig = plt . gcf ( ) dpi = fig . get_dpi ( ) renderer = LeafletRenderer ( crs = crs , epsg = epsg ) exporter = Exporter ( renderer ) exporter . run ( fig ) attribution = _attribution + ' | ' + tiles [ 1 ] mapid = str ( uuid . uuid4 ( ) ) . replace ( '-' , '' ) FloatEncoder . _formatter = ".{}f" . format ( float_precision ) gjdata = json . dumps ( renderer . geojson ( ) , cls = FloatEncoder ) params = { 'geojson' : gjdata , 'width' : fig . get_figwidth ( ) * dpi , 'height' : fig . get_figheight ( ) * dpi , 'mapid' : mapid , 'tile_url' : tiles [ 0 ] , 'attribution' : attribution , 'links' : [ _leaflet_js , _leaflet_css ] , 'embed_links' : embed_links , } html = template . render ( params ) return html
Convert a Matplotlib Figure to a Leaflet map
379
12
242,089
def fig_to_geojson ( fig = None , * * kwargs ) : if fig is None : fig = plt . gcf ( ) renderer = LeafletRenderer ( * * kwargs ) exporter = Exporter ( renderer ) exporter . run ( fig ) return renderer . geojson ( )
Returns a figure s GeoJSON representation as a dictionary
74
10
242,090
def display ( fig = None , closefig = True , * * kwargs ) : from IPython . display import HTML if fig is None : fig = plt . gcf ( ) if closefig : plt . close ( fig ) html = fig_to_html ( fig , * * kwargs ) # We embed everything in an iframe. iframe_html = '<iframe src="data:text/html;base64,{html}" width="{width}" height="{height}"></iframe>' . format ( html = base64 . b64encode ( html . encode ( 'utf8' ) ) . decode ( 'utf8' ) , width = '100%' , height = int ( 60. * fig . get_figheight ( ) ) , ) return HTML ( iframe_html )
Convert a Matplotlib Figure to a Leaflet map . Embed in IPython notebook .
177
20
242,091
def show ( fig = None , path = '_map.html' , * * kwargs ) : import webbrowser fullpath = os . path . abspath ( path ) with open ( fullpath , 'w' ) as f : save_html ( fig , fileobj = f , * * kwargs ) webbrowser . open ( 'file://' + fullpath )
Convert a Matplotlib Figure to a Leaflet map . Open in a browser
81
17
242,092
def create_incident ( * * kwargs ) : incidents = cachet . Incidents ( endpoint = ENDPOINT , api_token = API_TOKEN ) if 'component_id' in kwargs : return incidents . post ( name = kwargs [ 'name' ] , message = kwargs [ 'message' ] , status = kwargs [ 'status' ] , component_id = kwargs [ 'component_id' ] , component_status = kwargs [ 'component_status' ] ) else : return incidents . post ( name = kwargs [ 'name' ] , message = kwargs [ 'message' ] , status = kwargs [ 'status' ] )
Creates an incident
156
4
242,093
def incident_exists ( name , message , status ) : incidents = cachet . Incidents ( endpoint = ENDPOINT ) all_incidents = json . loads ( incidents . get ( ) ) for incident in all_incidents [ 'data' ] : if name == incident [ 'name' ] and status == incident [ 'status' ] and message . strip ( ) == incident [ 'message' ] . strip ( ) : return True return False
Check if an incident with these attributes already exists
96
9
242,094
def get_component ( id ) : components = cachet . Components ( endpoint = ENDPOINT ) component = json . loads ( components . get ( id = id ) ) return component [ 'data' ]
Gets a Cachet component by id
44
9
242,095
def api_token_required ( f , * args , * * kwargs ) : try : if args [ 0 ] . api_token is None : raise AttributeError ( 'Parameter api_token is required.' ) except AttributeError : raise AttributeError ( 'Parameter api_token is required.' ) return f ( * args , * * kwargs )
Decorator helper function to ensure some methods aren t needlessly called without an api_token configured .
78
21
242,096
def is_true ( self , item = None ) : if item : values = [ item ] else : values = [ ] self . _get_item_and_att_names ( * values ) return self . _passes_all
If you are filtering on object values you need to pass that object here .
50
15
242,097
def new_from_url ( cls , url , verify = True ) : response = requests . get ( url , verify = verify , timeout = 2.5 ) return cls . new_from_response ( response )
Constructs a new WebPage object for the URL using the requests module to fetch the HTML .
47
19
242,098
def new_from_response ( cls , response ) : return cls ( response . url , html = response . text , headers = response . headers )
Constructs a new WebPage object for the response using the BeautifulSoup module to parse the HTML .
33
21
242,099
def _prepare_app ( self , app ) : # Ensure these keys' values are lists for key in [ 'url' , 'html' , 'script' , 'implies' ] : try : value = app [ key ] except KeyError : app [ key ] = [ ] else : if not isinstance ( value , list ) : app [ key ] = [ value ] # Ensure these keys exist for key in [ 'headers' , 'meta' ] : try : value = app [ key ] except KeyError : app [ key ] = { } # Ensure the 'meta' key is a dict obj = app [ 'meta' ] if not isinstance ( obj , dict ) : app [ 'meta' ] = { 'generator' : obj } # Ensure keys are lowercase for key in [ 'headers' , 'meta' ] : obj = app [ key ] app [ key ] = { k . lower ( ) : v for k , v in obj . items ( ) } # Prepare regular expression patterns for key in [ 'url' , 'html' , 'script' ] : app [ key ] = [ self . _prepare_pattern ( pattern ) for pattern in app [ key ] ] for key in [ 'headers' , 'meta' ] : obj = app [ key ] for name , pattern in obj . items ( ) : obj [ name ] = self . _prepare_pattern ( obj [ name ] )
Normalize app data preparing it for the detection phase .
304
11