idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
13,200
def _get_pdb_id ( self , elem , * * kwargs ) : id = elem . attrib [ 'ID' ] if self . restrict_to_transmembrane_proteins : tmp = elem . attrib [ 'TMP' ] assert ( tmp == 'no' or tmp == 'yes' or tmp == 'not' ) if tmp == 'yes' : self . ids [ id ] = PDBTM . _get_tm_type ( elem ) else : self . ids [ id ] = self . ids . get ( id , 0 ) + 1
If self . restrict_to_transmembrane_proteins is False then this adds all ids to self . ids . Otherwise only transmembrane protein ids are added .
134
41
13,201
def get_xml ( self , pdb_id ) : self . tmp_string = None context = etree . iterparse ( io . BytesIO ( self . xml_contents ) , events = ( 'end' , ) , tag = self . PDBTM_entry_tag_type ) try : fast_iter ( context , self . _get_xml , pdb_id = pdb_id . upper ( ) ) except EarlyOut : pass return self . tmp_string
Returns the XML for pdb_id if the tag exists .
105
13
13,202
def multimatch ( self , origin = None , rel = None , target = None , attrs = None , include_ids = False ) : raise NotImplementedError origin = origin if origin is None or isinstance ( origin , set ) else set ( [ origin ] ) rel = rel if rel is None or isinstance ( rel , set ) else set ( [ rel ] ) target = target if target is None or isinstance ( target , set ) else set ( [ target ] ) for index , curr_rel in enumerate ( self . _relationships ) : matches = True if origin and curr_rel [ ORIGIN ] not in origin : matches = False if rel and curr_rel [ RELATIONSHIP ] not in rel : matches = False if target and curr_rel [ TARGET ] not in target : matches = False if attrs : for k , v in attrs . items ( ) : if k not in curr_rel [ ATTRIBUTES ] or curr_rel [ ATTRIBUTES ] . get ( k ) != v : matches = False if matches : if include_ids : yield index , ( curr_rel [ 0 ] , curr_rel [ 1 ] , curr_rel [ 2 ] , curr_rel [ 3 ] . copy ( ) ) else : yield ( curr_rel [ 0 ] , curr_rel [ 1 ] , curr_rel [ 2 ] , curr_rel [ 3 ] . copy ( ) ) return
Iterator over relationship IDs that match a pattern of components with multiple options provided for each component
321
17
13,203
def add ( self , origin , rel , target , attrs = None ) : if not origin : raise ValueError ( 'Relationship origin cannot be null' ) if not rel : raise ValueError ( 'Relationship ID cannot be null' ) attrs = attrs or { } origin_item = self . _db_coll . find_one ( { 'origin' : origin } ) rel = self . _abbreviate ( rel ) target = self . _abbreviate ( target ) rel_info = { 'rid' : rel , 'instances' : [ [ target , attrs ] ] } if origin_item is None : self . _db_coll . insert_one ( { 'origin' : origin , 'rels' : [ rel_info ] , } ) else : origin_item [ 'rels' ] . append ( rel_info ) self . _db_coll . replace_one ( { 'origin' : origin } , origin_item ) return
Add one relationship to the model
211
6
13,204
def remove ( self , index ) : raise NotImplementedError if hasattr ( index , '__iter__' ) : ind = set ( index ) else : ind = [ index ] # Rebuild relationships, excluding the provided indices self . _relationships = [ r for i , r in enumerate ( self . _relationships ) if i not in ind ]
Delete one or more relationship by index from the extent
77
10
13,205
async def act ( self ) : # Learn from domain artifacts. self . age += 1 self . added_last = False self . learn_from_domain ( method = self . env_learning_method , amount = self . env_learning_amount ) # Invent new artifact artifact = self . invent ( self . search_width ) args = artifact . framings [ self . name ] [ 'args' ] val = artifact . evals [ self . name ] self . _log ( logging . DEBUG , "Created spirograph with args={}, val={}" . format ( args , val ) ) self . spiro_args = args self . arg_history . append ( self . spiro_args ) self . add_artifact ( artifact ) if val >= self . _own_threshold : artifact . self_criticism = 'pass' # Train SOM with the invented artifact self . learn ( artifact , self . teaching_iterations ) # Save images if logger is defined # Add created artifact to voting candidates in the environment self . add_candidate ( artifact ) self . added_last = True elif self . jump == 'random' : largs = self . spiro_args self . spiro_args = np . random . uniform ( - 199 , 199 , self . spiro_args . shape ) self . _log ( logging . DEBUG , "Jumped from {} to {}" . format ( largs , self . spiro_args ) ) self . save_images ( artifact )
Agent s main method to create new spirographs .
319
13
13,206
def learn_from_domain ( self , method = 'random' , amount = 10 ) : if method == 'none' : return arts = self . env . artifacts if len ( arts ) == 0 : return if 'random' in method : samples = min ( len ( arts ) , amount ) ars = np . random . choice ( arts , samples , replace = False ) for a in ars : self . learn ( a , self . teaching_iterations ) if 'closest' in method : ars = arts dists = [ ] for a in ars : args = a . framings [ a . creator ] [ 'args' ] d = np . sqrt ( np . sum ( np . square ( args - self . spiro_args ) ) ) dists . append ( ( d , a ) ) dists . sort ( key = operator . itemgetter ( 0 ) ) for d , a in dists [ : amount ] : self . learn ( a , self . teaching_iterations )
Learn SOM from artifacts introduced to the environment .
216
9
13,207
def plot_distances ( self , mean_dist , distances , indeces ) : from matplotlib import pyplot as plt x = np . arange ( len ( distances ) ) y = [ mean_dist for i in x ] fig , ax = plt . subplots ( ) data_line = ax . plot ( indeces , distances , label = 'Min Distance to previous' , marker = '.' , color = 'black' , linestyle = "" ) mean_line = ax . plot ( indeces , y , label = 'Mean' , linestyle = '--' , color = 'green' ) if len ( distances ) > 0 : z = np . poly1d ( np . polyfit ( x , distances , 2 ) ) f = [ z ( i ) for i in x ] mean_line = ax . plot ( indeces , f , label = 'Fitted' , linestyle = '-' , color = 'red' ) legend = ax . legend ( loc = 'upper right' , prop = { 'size' : 8 } ) agent_vars = "{}_{}_{}{}_last={}_stmem=list{}_veto={}_sc={}_jump={}_sw={}_mr={}_maxN" . format ( self . sanitized_name ( ) , self . age , self . env_learning_method , self . env_learning_amount , self . env_learn_on_add , self . stmem . length , self . _novelty_threshold , self . _own_threshold , self . jump , self . search_width , self . move_radius ) ax . set_title ( "{} min distances: env_learn={} {}" . format ( self . name , self . env_learning_method , self . env_learning_amount ) ) ax . set_ylabel ( 'min distance to preceding artifact' ) ax . set_xlabel ( 'iteration' ) if self . logger is not None : imname = os . path . join ( self . logger . folder , '{}_dists.png' . format ( agent_vars ) ) plt . savefig ( imname ) plt . close ( ) else : plt . show ( )
Plot distances of the generated spirographs w . r . t . the previously generated spirogaphs .
493
25
13,208
def get_qtls_from_mapqtl_data ( matrix , threshold , inputfile ) : trait_name = inputfile . split ( ')_' , 1 ) [ 1 ] . split ( '.mqo' ) [ 0 ] qtls = [ ] qtl = None for entry in matrix [ 1 : ] : if qtl is None : qtl = entry if qtl [ 1 ] != entry [ 1 ] : if float ( qtl [ 4 ] ) > float ( threshold ) : qtl [ 0 ] = trait_name qtls . append ( qtl ) qtl = entry if entry [ 4 ] == '' : # pragma: no cover entry [ 4 ] = 0 if qtl [ 4 ] == '' : # pragma: no cover qtl [ 4 ] = 0 if float ( entry [ 4 ] ) > float ( qtl [ 4 ] ) : qtl = entry if float ( qtl [ 4 ] ) > float ( threshold ) : qtl [ 0 ] = trait_name if qtl not in qtls : qtls . append ( qtl ) return qtls
Extract the QTLs found by MapQTL reading its file . This assume that there is only one QTL per linkage group .
241
28
13,209
def get_files ( cls , folder , session_id = '' ) : filelist = [ ] if folder is None or not os . path . isdir ( folder ) : return filelist if session_id is None : session_id = '' for root , dirs , files in os . walk ( folder ) : for filename in files : if filename . startswith ( 'Session %s' % session_id ) and filename . endswith ( '.mqo' ) : filename = os . path . join ( root , filename ) filelist . append ( filename ) return filelist
Retrieve the list of files the plugin can work on . Find this list based on the files name files extension or even actually by reading in the file . If a session identifier is specified it will restrict the list of files returned to those with this session identifier in their name .
126
55
13,210
def get_session_identifiers ( cls , folder = None , inputfile = None ) : sessions = [ ] if folder is None or not os . path . isdir ( folder ) : return sessions for root , dirs , files in os . walk ( folder ) : for filename in files : if filename . startswith ( 'Session ' ) and filename . endswith ( '.mqo' ) : session = filename . split ( ) [ 1 ] if session not in sessions : sessions . append ( session ) return sessions
Retrieve the list of session identifiers contained in the data on the folder .
112
15
13,211
def parse_range ( s , range_separator = '-' ) : return reduce ( lambda x , y : x + y , ( map ( lambda r : ( range ( int ( r . split ( range_separator ) [ 0 ] ) , int ( r . split ( range_separator ) [ 1 ] ) + 1 ) ) if range_separator in r else [ int ( r ) ] , s . split ( ',' ) ) ) )
Parses the string s which contains indices and ranges and returns the explicit list of integers defined by s . Written by Laurens Kraal 2014 .
97
30
13,212
def merge_range_pairs ( prs ) : new_prs = [ ] sprs = [ sorted ( p ) for p in prs ] sprs = sorted ( sprs ) merged = False x = 0 while x < len ( sprs ) : newx = x + 1 new_pair = list ( sprs [ x ] ) for y in range ( x + 1 , len ( sprs ) ) : if new_pair [ 0 ] <= sprs [ y ] [ 0 ] - 1 <= new_pair [ 1 ] : new_pair [ 0 ] = min ( new_pair [ 0 ] , sprs [ y ] [ 0 ] ) new_pair [ 1 ] = max ( new_pair [ 1 ] , sprs [ y ] [ 1 ] ) newx = y + 1 if new_pair not in new_prs : new_prs . append ( new_pair ) x = newx return new_prs
Takes in a list of pairs specifying ranges and returns a sorted list of merged sorted ranges .
203
19
13,213
def split_pdb_residue ( s ) : if s . isdigit ( ) : return ( int ( s ) , ' ' ) else : assert ( s [ : - 1 ] . isdigit ( ) ) return ( ( s [ : - 1 ] , s [ - 1 ] ) )
Splits a PDB residue into the numeric and insertion code components .
64
14
13,214
def do_chunked_gzip ( infh , outfh , filename ) : import gzip gzfh = gzip . GzipFile ( 'rawlogs' , mode = 'wb' , fileobj = outfh ) if infh . closed : infh = open ( infh . name , 'r' ) else : infh . seek ( 0 ) readsize = 0 sys . stdout . write ( 'Gzipping {0}: ' . format ( filename ) ) if os . stat ( infh . name ) . st_size : infh . seek ( 0 ) progressbar = ProgressBar ( sys . stdout , os . stat ( infh . name ) . st_size , "bytes gzipped" ) while True : chunk = infh . read ( GZIP_CHUNK_SIZE ) if not chunk : break if sys . version_info [ 0 ] >= 3 : # noinspection PyArgumentList gzfh . write ( bytes ( chunk , "utf-8" ) ) else : gzfh . write ( chunk ) readsize += len ( chunk ) progressbar . redraw ( readsize ) gzfh . close ( )
A memory - friendly way of compressing the data .
260
11
13,215
def mail_message ( smtp_server , message , from_address , rcpt_addresses ) : if smtp_server [ 0 ] == '/' : # Sending the message with local sendmail p = os . popen ( smtp_server , 'w' ) p . write ( message ) p . close ( ) else : # Sending the message using a smtp server import smtplib server = smtplib . SMTP ( smtp_server ) server . sendmail ( from_address , rcpt_addresses , message ) server . quit ( )
Send mail using smtp .
123
6
13,216
def get_value_unit ( value , unit , prefix ) : prefixes = ( '' , 'K' , 'M' , 'G' , 'T' ) if len ( unit ) : if unit [ : 1 ] in prefixes : valprefix = unit [ 0 ] unit = unit [ 1 : ] else : valprefix = '' else : valprefix = '' while valprefix != prefix : uidx = prefixes . index ( valprefix ) if uidx > prefixes . index ( prefix ) : value *= 1024 valprefix = prefixes [ uidx - 1 ] else : if value < 10240 : return value , '{0}{1}' . format ( valprefix , unit ) value = int ( round ( value / 1024.0 ) ) valprefix = prefixes [ uidx + 1 ] return value , '{0}{1}' . format ( valprefix , unit )
Return a human - readable value with unit specification . Try to transform the unit prefix to the one passed as parameter . When transform to higher prefix apply nearest integer round .
194
33
13,217
def get_fmt_results ( results , limit = 5 , sep = '::' , fmt = None ) : result_list = [ ] for key in sorted ( results , key = lambda x : results [ x ] , reverse = True ) : if len ( result_list ) >= limit and results [ key ] <= 1 : break if fmt is not None : fmtkey = [ ] for i in range ( len ( key ) ) : if i % 2 == 1 : fmtkey . append ( fmt . format ( key [ i ] ) ) else : fmtkey . append ( key [ i ] ) result_list . append ( u'{0}({1})' . format ( sep . join ( fmtkey ) , results [ key ] ) ) else : result_list . append ( u'{0}({1})' . format ( sep . join ( key ) , results [ key ] ) ) else : return result_list if fmt is not None : result_list . append ( fmt . format ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) ) ) else : result_list . append ( u'[%d more skipped]' % ( len ( results ) - len ( result_list ) ) ) return result_list
Return a list of formatted strings representation on a result dictionary . The elements of the key are divided by a separator string . The result is appended after the key between parentheses . Apply a format transformation to odd elements of the key if a fmt parameter is passed .
273
53
13,218
def safe_expand ( template , mapping ) : for _ in range ( len ( mapping ) + 1 ) : _template = template template = string . Template ( template ) . safe_substitute ( mapping ) if template == _template : return template else : raise ValueError ( "circular mapping provided!" )
Safe string template expansion . Raises an error if the provided substitution mapping has circularities .
66
18
13,219
def protected_property ( func ) : if func . __name__ . startswith ( '_' ) : raise ValueError ( "%r: Cannot decorate a protected method!" % func ) @ property @ wraps ( func ) def proxy_wrapper ( self ) : try : return getattr ( self , '_%s' % func . __name__ ) except AttributeError : pass return func ( self ) return proxy_wrapper
Class method decorator that creates a property that returns the protected attribute or the value returned by the wrapped method if the protected attribute is not defined .
91
29
13,220
def open_resource ( source ) : try : return open ( source , mode = 'rb' ) except ( IOError , OSError ) as err : try : resource = urlopen ( source ) except ValueError : pass else : resource . name = resource . url if hasattr ( resource , '__enter__' ) : return resource else : return closing ( resource ) raise err except TypeError : if hasattr ( source , 'read' ) and hasattr ( source , 'readlines' ) : return source # Source is already a file-like object raise
Opens a resource in binary reading mode . Wraps the resource with a context manager when it doesn t have one .
119
24
13,221
def load ( stream , fmt = 'lha' ) : if fmt == 'lha' : return pylha . load ( stream ) elif fmt == 'json' : if isinstance ( stream , str ) : return json . loads ( stream ) else : return json . load ( stream ) elif fmt == 'yaml' : return yaml . load ( stream )
Load a parameter file in DSixTools SLHA - like format or its JSON or YAML representation .
80
22
13,222
def sm_lha2dict ( lha ) : d = OrderedDict ( ) v = dict ( lha [ 'BLOCK' ] [ 'GAUGE' ] [ 'values' ] ) d [ 'g' ] = v [ 1 ] d [ 'gp' ] = v [ 2 ] d [ 'gs' ] = v [ 3 ] v = dict ( lha [ 'BLOCK' ] [ 'SCALAR' ] [ 'values' ] ) d [ 'Lambda' ] = v [ 1 ] d [ 'm2' ] = v [ 2 ] d [ 'Gu' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GU' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGU' in lha [ 'BLOCK' ] : d [ 'Gu' ] = d [ 'Gu' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGU' ] [ 'values' ] , ( 3 , 3 ) ) d [ 'Gd' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GD' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGD' in lha [ 'BLOCK' ] : d [ 'Gd' ] = d [ 'Gd' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGD' ] [ 'values' ] , ( 3 , 3 ) ) d [ 'Ge' ] = lha2matrix ( lha [ 'BLOCK' ] [ 'GE' ] [ 'values' ] , ( 3 , 3 ) ) if 'IMGE' in lha [ 'BLOCK' ] : d [ 'Ge' ] = d [ 'Ge' ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMGE' ] [ 'values' ] , ( 3 , 3 ) ) # thetas default to 0 if 'THETA' in lha [ 'BLOCK' ] : v = dict ( lha [ 'BLOCK' ] [ 'THETA' ] [ 'values' ] ) d [ 'Theta' ] = v . get ( 1 , 0 ) d [ 'Thetap' ] = v . get ( 2 , 0 ) d [ 'Thetas' ] = v . get ( 3 , 0 ) else : d [ 'Theta' ] = 0 d [ 'Thetap' ] = 0 d [ 'Thetas' ] = 0 return d
Convert a dictionary returned by pylha from a DSixTools SM input file into a dictionary of SM values .
563
24
13,223
def sm_dict2lha ( d ) : blocks = OrderedDict ( [ ( 'GAUGE' , { 'values' : [ [ 1 , d [ 'g' ] . real ] , [ 2 , d [ 'gp' ] . real ] , [ 3 , d [ 'gs' ] . real ] ] } ) , ( 'SCALAR' , { 'values' : [ [ 1 , d [ 'Lambda' ] . real ] , [ 2 , d [ 'm2' ] . real ] ] } ) , ( 'GU' , { 'values' : matrix2lha ( d [ 'Gu' ] . real ) } ) , ( 'IMGU' , { 'values' : matrix2lha ( d [ 'Gu' ] . imag ) } ) , ( 'GD' , { 'values' : matrix2lha ( d [ 'Gd' ] . real ) } ) , ( 'IMGD' , { 'values' : matrix2lha ( d [ 'Gd' ] . imag ) } ) , ( 'GE' , { 'values' : matrix2lha ( d [ 'Ge' ] . real ) } ) , ( 'IMGE' , { 'values' : matrix2lha ( d [ 'Ge' ] . imag ) } ) , ( 'THETA' , { 'values' : [ [ 1 , d [ 'Theta' ] . real ] , [ 2 , d [ 'Thetap' ] . real ] , [ 3 , d [ 'Thetas' ] . real ] ] } ) , ] ) return { 'BLOCK' : blocks }
Convert a a dictionary of SM parameters into a dictionary that pylha can convert into a DSixTools SM output file .
354
26
13,224
def wc_lha2dict ( lha ) : C = OrderedDict ( ) # try to read all WCs with 0, 2, or 4 fermions; if not found, set to zero for k , ( block , i ) in WC_dict_0f . items ( ) : try : C [ k ] = dict ( lha [ 'BLOCK' ] [ block ] [ 'values' ] ) [ i ] except KeyError : C [ k ] = 0 for k in definitions . WC_keys_2f : try : C [ k ] = lha2matrix ( lha [ 'BLOCK' ] [ 'WC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 ) ) . real except KeyError : C [ k ] = np . zeros ( ( 3 , 3 ) ) try : # try to add imaginary part C [ k ] = C [ k ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMWC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 ) ) except KeyError : pass for k in definitions . WC_keys_4f : try : C [ k ] = lha2matrix ( lha [ 'BLOCK' ] [ 'WC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 , 3 , 3 ) ) except KeyError : C [ k ] = np . zeros ( ( 3 , 3 , 3 , 3 ) ) try : # try to add imaginary part C [ k ] = C [ k ] + 1j * lha2matrix ( lha [ 'BLOCK' ] [ 'IMWC' + k . upper ( ) ] [ 'values' ] , ( 3 , 3 , 3 , 3 ) ) except KeyError : pass return C
Convert a dictionary returned by pylha from a DSixTools WC input file into a dictionary of Wilson coefficients .
401
24
13,225
def compute_rmsd_by_matrix ( dataframe_1 , dataframe_2 , use_assertion = False ) : if use_assertion : assert ( [ i for i in dataframe_1 . index ] == [ i for i in dataframe_2 . index ] ) # Note: this assertion creates garbage memory allocations num_points = dataframe_1 . shape [ 0 ] return numpy . linalg . norm ( dataframe_1 - dataframe_2 ) / numpy . sqrt ( num_points )
Computes the RMSD of two pandas dataframes . The dataframes are expected to be of equal dimensions and use_assertion can be set to assert that the row indices match .
117
39
13,226
def jinja_extensions_feature ( app ) : # register jinja filters app . jinja_env . globals [ 'momentjs' ] = MomentJsFilters app . jinja_env . filters . update ( MomentJsFilters ( ) . get_filters ( ) ) app . jinja_env . filters . update ( DateFilters ( ) . get_filters ( ) ) app . jinja_env . filters . update ( HumanizeFilters ( ) . get_filters ( ) ) # register custom jinja functions app . jinja_env . globals . update ( dict ( asset = functions . asset , dev_proxy = functions . dev_proxy ) )
Enables custom templating extensions
157
7
13,227
def log ( msg , level = 0 ) : red = '\033[91m' endc = '\033[0m' # configure the logging module cfg = { 'version' : 1 , 'disable_existing_loggers' : False , 'formatters' : { 'stdout' : { 'format' : '[%(levelname)s]: %(asctime)s - %(message)s' , 'datefmt' : '%x %X' } , 'stderr' : { 'format' : red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc , 'datefmt' : '%x %X' } } , 'handlers' : { 'stdout' : { 'class' : 'logging.StreamHandler' , 'level' : 'DEBUG' , 'formatter' : 'stdout' } , 'stderr' : { 'class' : 'logging.StreamHandler' , 'level' : 'ERROR' , 'formatter' : 'stderr' } } , 'loggers' : { 'info' : { 'handlers' : [ 'stdout' ] , 'level' : 'INFO' , 'propagate' : True } , 'error' : { 'handlers' : [ 'stderr' ] , 'level' : 'ERROR' , 'propagate' : False } } } dictConfig ( cfg ) lg = 'info' if level == 0 else 'error' lvl = 20 if level == 0 else 40 logger = logging . getLogger ( lg ) logger . log ( lvl , msg )
Logs a message to the console with optional level paramater
367
12
13,228
def insert ( self , part ) : params = { k : str ( v ) for k , v in part . params . items ( ) } res = c . create_assembly_instance ( self . uri . as_dict ( ) , part . uri . as_dict ( ) , params ) return res
Insert a part into this assembly .
66
7
13,229
def parse_http_scheme ( uri ) : regex = re . compile ( r'^(?:http)s?://' , flags = re . IGNORECASE ) match = regex . match ( uri ) return match . group ( 0 ) if match else 'http://'
match on http scheme if no match is found will assume http
62
12
13,230
def parse_stream ( response ) : stream_data = [ ] stream = stdout for data in response : if data : try : data = data . decode ( 'utf-8' ) except AttributeError as e : logger . exception ( "Unable to parse stream, Attribute Error Raised: {0}" . format ( e ) ) stream . write ( data ) continue try : normalized_data = normalize_keys ( json . loads ( data ) ) except ValueError : stream . write ( data ) continue except TypeError : stream . write ( data ) continue if 'progress' in normalized_data : stream_data . append ( normalized_data ) _display_progress ( normalized_data , stream ) elif 'error' in normalized_data : _display_error ( normalized_data , stream ) elif 'status' in normalized_data : stream_data . append ( normalized_data ) _display_status ( normalized_data , stream ) elif 'stream' in normalized_data : stream_data . append ( normalized_data ) _display_stream ( normalized_data , stream ) else : stream . write ( data ) stream . flush ( ) return stream_data
take stream from docker - py lib and display it to the user .
250
14
13,231
def normalize_keys ( suspect , snake_case = True ) : if not isinstance ( suspect , dict ) : raise TypeError ( 'you must pass a dict.' ) for key in list ( suspect ) : if not isinstance ( key , six . string_types ) : continue if snake_case : s1 = first_cap_re . sub ( r'\1_\2' , key ) new_key = all_cap_re . sub ( r'\1_\2' , s1 ) . lower ( ) # .replace('-', '_') else : new_key = key . lower ( ) value = suspect . pop ( key ) if isinstance ( value , dict ) : suspect [ new_key ] = normalize_keys ( value , snake_case ) elif isinstance ( value , list ) : for i in range ( 0 , len ( value ) ) : if isinstance ( value [ i ] , dict ) : normalize_keys ( value [ i ] , snake_case ) suspect [ new_key ] = value else : suspect [ new_key ] = value return suspect
take a dict and turn all of its type string keys into snake_case
239
15
13,232
def _display_status ( normalized_data , stream ) : if 'Pull complete' in normalized_data [ 'status' ] or 'Download complete' in normalized_data [ 'status' ] : stream . write ( "\n" ) if 'id' in normalized_data : stream . write ( "%s - " % normalized_data [ 'id' ] ) stream . write ( "{0}\n" . format ( normalized_data [ 'status' ] ) )
print status message from docker - py stream .
99
9
13,233
def _display_stream ( normalized_data , stream ) : try : stream . write ( normalized_data [ 'stream' ] ) except UnicodeEncodeError : stream . write ( normalized_data [ 'stream' ] . encode ( "utf-8" ) )
print stream message from docker - py stream .
56
9
13,234
def version ( self ) -> str : output , _ = self . _execute ( 'version' ) return output . splitlines ( ) [ 0 ] . split ( ) [ - 1 ]
Show the version number of Android Debug Bridge .
39
9
13,235
def get_state ( self ) -> str : output , error = self . _execute ( 'get-state' ) if error : raise DeviceConnectionException ( error . split ( ':' , 1 ) [ - 1 ] . strip ( ) ) return output . strip ( )
offline | bootloader | device
57
7
13,236
def acme_init ( ) : acme_private_key = ACME_PRIVATE_KEY acme_intermediate_cert = ACME_INTERMEDIATE_CERT acme_intermediate_cert_url = ACME_INTERMEDIATE_CERT_URL if not os . path . isfile ( acme_private_key ) : print 'Create {}' . format ( acme_private_key ) cmd = 'openssl genrsa 4096 > {acme_private_key}' . format ( acme_private_key = acme_private_key ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) helpers . file_rights ( acme_private_key , mode = 0444 , uid = 0 , gid = 0 ) else : print 'Already exist: {}' . format ( acme_private_key ) if not os . path . isfile ( acme_intermediate_cert ) : print 'Create {}' . format ( acme_intermediate_cert ) cmd = 'wget -O - {acme_intermediate_cert_url} > {acme_intermediate_cert}' cmd = cmd . format ( acme_intermediate_cert_url = acme_intermediate_cert_url , acme_intermediate_cert = acme_intermediate_cert ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) helpers . file_rights ( acme_intermediate_cert , mode = 0444 , uid = 0 , gid = 0 ) else : print 'Already exist: {}' . format ( acme_intermediate_cert )
Init acme key
405
4
13,237
def acme_sign_certificate ( common_name , size = DEFAULT_KEY_SIZE ) : private_key_path = '{}/{}.key' . format ( CERTIFICATES_PATH , common_name ) certificate_path = '{}/{}.crt' . format ( CERTIFICATES_PATH , common_name ) certificate_request_path = '{}/{}.csr' . format ( CERTIFICATES_PATH , common_name ) signed_cert = '{certificates_path}/{common_name}-signed.crt' . format ( certificates_path = CERTIFICATES_PATH , common_name = common_name ) generate_certificate ( common_name , size ) cmd = 'openssl req -new -sha256 -key {private_key_path}' cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}' cmd = cmd . format ( private_key_path = private_key_path , common_name = common_name , certificate_request_path = certificate_request_path ) p = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE , close_fds = True ) p . communicate ( ) _internal_sign_certificate ( certificate_path , certificate_request_path , signed_cert ) cron = "/etc/cron.monthly/acme-renew" if not os . path . exists ( cron ) : with open ( cron , "w" ) as file : file . write ( "#!/bin/bash\ncozy_management renew_certificates\n" ) st = os . stat ( cron ) os . chmod ( cron , st . st_mode | S_IXUSR )
Sign certificate with acme_tiny for let s encrypt
401
11
13,238
def acme_renew_certificates ( ) : for csr in glob ( os . path . join ( CERTIFICATES_PATH , '*.csr' ) ) : common_name = os . path . basename ( csr ) common_name = os . path . splitext ( common_name ) [ 0 ] certificate_path = "{}.crt" . format ( common_name ) certificate_path = os . path . join ( CERTIFICATES_PATH , certificate_path ) with open ( certificate_path ) as file : crt = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , file . read ( ) ) expiration = crt . get_notAfter ( ) expiration = _parse_asn1_generalized_date ( expiration ) remaining = expiration - datetime . utcnow ( ) if remaining > timedelta ( days = 30 ) : print "No need to renew {} ({})" . format ( certificate_path , remaining ) continue print "Renewing {} ({})" . format ( certificate_path , remaining ) certificate_request_path = "{}.csr" . format ( common_name ) certificate_request_path = os . path . join ( CERTIFICATES_PATH , certificate_request_path ) signed_cert = "{}-signed.crt" . format ( common_name ) signed_cert = os . path . join ( CERTIFICATES_PATH , signed_cert ) _internal_sign_certificate ( certificate_path , certificate_request_path , signed_cert )
Renew certificates with acme_tiny for let s encrypt
343
12
13,239
def get_crt_common_name ( certificate_path = OLD_CERTIFICATE_PATH ) : try : certificate_file = open ( certificate_path ) crt = OpenSSL . crypto . load_certificate ( OpenSSL . crypto . FILETYPE_PEM , certificate_file . read ( ) ) return crt . get_subject ( ) . commonName except IOError : return None
Get CN from certificate
87
4
13,240
def normalize_cert_dir ( ) : current_cn = get_crt_common_name ( ) if not os . path . isdir ( COZY_CONFIG_PATH ) : print 'Need to create {}' . format ( COZY_CONFIG_PATH ) os . mkdir ( COZY_CONFIG_PATH , 0755 ) if not os . path . isdir ( CERTIFICATES_PATH ) : print 'Need to create {}' . format ( CERTIFICATES_PATH ) os . mkdir ( CERTIFICATES_PATH , 0755 ) if not os . path . isdir ( ACME_PRIVATE_PATH ) : print 'Need to create {}' . format ( ACME_PRIVATE_PATH ) os . mkdir ( ACME_PRIVATE_PATH , 0700 ) if os . path . isfile ( OLD_CERTIFICATE_PATH ) and not os . path . islink ( OLD_CERTIFICATE_PATH ) : target = '{}/{}.crt' . format ( CERTIFICATES_PATH , current_cn ) print 'Move {} to {}' . format ( CERTIFICATES_PATH , target ) os . rename ( OLD_CERTIFICATE_PATH , target ) else : print 'Nothing to do for {}' . format ( OLD_CERTIFICATE_PATH ) if os . path . isfile ( OLD_PRIVATE_KEY_PATH ) and not os . path . islink ( OLD_PRIVATE_KEY_PATH ) : target = '{}/{}.key' . format ( CERTIFICATES_PATH , current_cn ) print 'Move {} to {}' . format ( OLD_PRIVATE_KEY_PATH , target ) os . rename ( OLD_PRIVATE_KEY_PATH , target ) else : print 'Nothing to do for {}' . format ( OLD_PRIVATE_KEY_PATH ) if current_cn : make_links ( current_cn )
Put old cerfificate form to new one
445
9
13,241
def clean_links ( ) : if os . path . isfile ( CURRENT_CERTIFICATE_PATH ) : print 'Delete symlink {}' . format ( CURRENT_CERTIFICATE_PATH ) os . remove ( CURRENT_CERTIFICATE_PATH ) if os . path . isfile ( CURRENT_PRIVATE_KEY_PATH ) : print 'Delete symlink {}' . format ( CURRENT_PRIVATE_KEY_PATH ) os . remove ( CURRENT_PRIVATE_KEY_PATH )
Clean symlink for nginx
118
7
13,242
def make_links ( current_cn ) : if not os . path . isfile ( CURRENT_CERTIFICATE_PATH ) : target = '{}/{}.crt' . format ( CERTIFICATES_PATH , current_cn ) print 'Create symlink {} -> {}' . format ( CURRENT_CERTIFICATE_PATH , target ) os . symlink ( target , CURRENT_CERTIFICATE_PATH ) if not os . path . isfile ( CURRENT_PRIVATE_KEY_PATH ) : target = '{}/{}.key' . format ( CERTIFICATES_PATH , current_cn ) print 'Create symlink {} -> {}' . format ( CURRENT_PRIVATE_KEY_PATH , target ) os . symlink ( target , CURRENT_PRIVATE_KEY_PATH )
Create symlink for nginx
188
7
13,243
def create ( self , agent_cls = None , n_agents = 10 , agent_kwargs = { } , env_cls = Environment , env_kwargs = { } , callback = None , conns = 0 , log_folder = None ) : if not issubclass ( env_cls , Environment ) : raise TypeError ( "Environment class must be derived from ({}" . format ( Environment . __class__ . __name__ ) ) if callback is not None and not hasattr ( callback , '__call__' ) : raise TypeError ( "Callback must be callable." ) if hasattr ( agent_cls , '__iter__' ) : for e in agent_cls : if not issubclass ( e , CreativeAgent ) : raise TypeError ( "All agent classes must be derived from {}" . format ( CreativeAgent . __class__ . __name__ ) ) else : if not issubclass ( agent_cls , CreativeAgent ) : raise TypeError ( "Agent class must be derived from {}" . format ( CreativeAgent . __class__ . __name__ ) ) env = env_cls . create ( * * env_kwargs ) agents = [ ] if hasattr ( agent_cls , '__iter__' ) : for i in range ( len ( n_agents ) ) : agent_kwargs [ i ] [ 'environment' ] = env agent_kwargs [ i ] [ 'log_folder' ] = log_folder agents = agents + [ agent_cls [ i ] ( * * agent_kwargs [ i ] ) for e in range ( n_agents [ i ] ) ] else : agent_kwargs [ 'environment' ] = env agent_kwargs [ 'log_folder' ] = log_folder agents = [ agent_cls ( * * agent_kwargs ) for e in range ( n_agents ) ] if conns > 0 : env . create_random_connections ( n = conns ) return Simulation ( env , callback , log_folder )
A convenience function to create simple simulations .
440
8
13,244
def _init_step ( self ) : self . _age += 1 self . env . age = self . _age self . _log ( logging . INFO , "" ) self . _log ( logging . INFO , "\t***** Step {:0>4} *****" . format ( self . age ) ) self . _log ( logging . INFO , "" ) self . _agents_to_act = self . _get_order_agents ( ) self . _step_processing_time = 0.0 self . _step_start_time = time . time ( )
Initialize next step of simulation to be run .
121
10
13,245
def _finalize_step ( self ) : t = time . time ( ) if self . _callback is not None : self . _callback ( self . age ) t2 = time . time ( ) self . _step_processing_time += t2 - t self . _log ( logging . INFO , "Step {} run in: {:.3f}s ({:.3f}s of " "actual processing time used)" . format ( self . age , self . _step_processing_time , t2 - self . _step_start_time ) ) self . _processing_time += self . _step_processing_time
Finalize simulation step after all agents have acted for the current step .
136
14
13,246
def async_step ( self ) : assert len ( self . _agents_to_act ) == 0 self . _init_step ( ) t = time . time ( ) aiomas . run ( until = self . env . trigger_all ( ) ) self . _agents_to_act = [ ] self . _step_processing_time = time . time ( ) - t self . _finalize_step ( )
Progress simulation by running all agents once asynchronously .
91
11
13,247
def steps ( self , n ) : assert len ( self . _agents_to_act ) == 0 for _ in range ( n ) : self . step ( )
Progress simulation with given amount of steps .
35
8
13,248
def step ( self ) : assert len ( self . _agents_to_act ) == 0 self . next ( ) while len ( self . _agents_to_act ) > 0 : self . next ( )
Progress simulation with a single step .
45
7
13,249
def end ( self , folder = None ) : ret = self . env . destroy ( folder = folder ) self . _end_time = time . time ( ) self . _log ( logging . DEBUG , "Simulation run with {} steps took {:.3f}s to" " complete, while actual processing time was {:.3f}s." . format ( self . age , self . _end_time - self . _start_time , self . _processing_time ) ) return ret
End the simulation and destroy the current simulation environment .
105
10
13,250
def get_unique_record ( self , sql , parameters = None , quiet = False , locked = False ) : results = self . execute_select ( sql , parameters = parameters , quiet = quiet , locked = locked ) assert ( len ( results ) == 1 ) return results [ 0 ]
I use this pattern a lot . Return the single record corresponding to the query .
60
16
13,251
def run_transaction ( self , command_list , do_commit = True ) : pass # I decided against creating this for now. # It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure # in the DDGadmin project and then use callproc for c in command_list : if c . find ( ";" ) != - 1 or c . find ( "\\G" ) != - 1 : # Catches *some* injections raise Exception ( "The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c ) if do_commit : sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n" . join ( command_list ) else : sql = "START TRANSACTION;\n%s;" % "\n" . join ( command_list ) #print(sql) return
This can be used to stage multiple commands and roll back the transaction if an error occurs . This is useful if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur if the entity is tied to table not specified in the list of commands . Performing this as a transaction avoids the situation where the records are partially removed . If do_commit is false the entire transaction is cancelled .
209
85
13,252
def callproc ( self , procname , parameters = ( ) , quiet = False , expect_return_value = False ) : self . procedures_run += 1 i = 0 errcode = 0 caughte = None out_param_indices = [ ] for j in range ( len ( parameters ) ) : p = parameters [ j ] if type ( p ) == type ( '' ) and p [ 0 ] == '@' : assert ( p . find ( ' ' ) == - 1 ) out_param_indices . append ( j ) if procname not in self . list_stored_procedures ( ) : raise Exception ( "The stored procedure '%s' does not exist." % procname ) if not re . match ( "^\s*\w+\s*$" , procname ) : raise Exception ( "Expected a stored procedure name in callproc but received '%s'." % procname ) while i < self . numTries : i += 1 try : self . _get_connection ( ) cursor = self . connection . cursor ( ) if type ( parameters ) != type ( ( ) ) : parameters = ( parameters , ) errcode = cursor . callproc ( procname , parameters ) self . lastrowid = int ( cursor . lastrowid ) cursor . close ( ) # Get the out parameters out_param_results = [ ] if out_param_indices : out_param_results = self . execute ( 'SELECT %s' % ", " . join ( [ '@_%s_%d AS %s' % ( procname , pindex , parameters [ pindex ] [ 1 : ] ) for pindex in out_param_indices ] ) ) return out_param_results except MySQLdb . OperationalError , e : self . _close_connection ( ) errcode = e [ 0 ] caughte = e continue except : self . _close_connection ( ) traceback . print_exc ( ) break if not quiet : sys . stderr . write ( "\nSQL execution error call stored procedure %s at %s:" % ( procname , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) ) sys . stderr . write ( "\nErrorcode/Error: %d - '%s'.\n" % ( errcode , str ( caughte ) ) ) sys . stderr . flush ( ) raise MySQLdb . OperationalError ( caughte )
Calls a MySQL stored procedure procname and returns the return values . This uses DictCursor . To get return values back out of a stored procedure prefix the parameter with a
547
36
13,253
def t_insert_dict_if_new ( self , tblname , d , PKfields , fields = None ) : SQL , values = self . _insert_dict_if_new_inner ( tblname , d , PKfields , fields = fields ) if SQL != False : self . execute_select ( SQL , parameters = values , locked = True ) return True , d return False , values
A version of insertDictIfNew for transactions . This does not call commit .
86
17
13,254
def create_insert_dict_string ( self , tblname , d , PKfields = [ ] , fields = None , check_existing = False ) : if type ( PKfields ) == type ( "" ) : PKfields = [ PKfields ] if fields == None : fields = sorted ( d . keys ( ) ) values = None SQL = None try : # Search for existing records wherestr = [ ] PKvalues = [ ] for PKfield in PKfields : if d [ PKfield ] == None : wherestr . append ( "%s IS NULL" % PKfield ) else : wherestr . append ( "%s=%%s" % PKfield ) PKvalues . append ( d [ PKfield ] ) PKfields = join ( PKfields , "," ) wherestr = join ( wherestr , " AND " ) record_exists = None if check_existing : record_exists = not ( not ( self . execute_select ( "SELECT %s FROM %s" % ( PKfields , tblname ) + " WHERE %s" % wherestr , parameters = tuple ( PKvalues ) , locked = False ) ) ) SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) ) values = tuple ( [ d [ k ] for k in fields ] ) return SQL , values , record_exists except Exception , e : raise Exception ( "Error occurred during database insertion: '%s'. %s" % ( str ( e ) , traceback . format_exc ( ) ) )
The main function of the insert_dict functions . This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database .
355
34
13,255
def bytes_to_int ( byte_array , big_endian = True , signed = False ) : if six . PY3 : order = 'little' if big_endian : order = 'big' return int . from_bytes ( byte_array , byteorder = order , signed = signed ) else : length = len ( byte_array ) if length == 1 : code = 'B' elif length == 2 : code = 'H' elif length == 4 : code = 'L' elif length == 8 : code = 'Q' else : raise Exception ( "bytes_to_int : length of byte_array should be 1, 2, 4, or 8" ) if big_endian : code = '>' + code else : code = '<' + code if signed : code = code . lower ( ) return struct . unpack ( code , byte_array ) [ 0 ]
Converts a byte array to an integer .
195
9
13,256
def ip_to_bytes ( ip_str , big_endian = True ) : if big_endian : code = '>L' else : code = '<L' return bytes ( struct . unpack ( code , socket . inet_aton ( ip_str ) ) [ 0 ] )
Converts an IP given as a string to a byte sequence
65
12
13,257
def get_file ( self , attr_name ) : return os . path . abspath ( os . path . join ( self . folder , "{}.log" . format ( attr_name ) ) )
Return absolute path to logging file for obj s attribute .
45
11
13,258
def log_attr ( self , level , attr_name ) : msg = self . write ( attr_name ) self . log ( level , msg )
Log attribute to file and pass the message to underlying logger .
34
12
13,259
def write ( self , attr_name , prefix = None ) : if self . _folder is None : return separator = "\t" attr = getattr ( self . obj , attr_name ) if hasattr ( attr , '__iter__' ) : msg = separator . join ( [ str ( e ) for e in attr ] ) else : msg = str ( attr ) if prefix is not None : msg = "{}\t{}" . format ( getattr ( self . obj , prefix ) , msg ) path = self . get_file ( attr_name ) with open ( path , 'a' ) as f : f . write ( "{}\n" . format ( msg ) ) return msg
Write attribute s value to a file .
156
8
13,260
def is_lambda ( fun ) : return isinstance ( fun , type ( LAMBDA ) ) and fun . __name__ == LAMBDA . __name__
Check whether the given function is a lambda function .
37
10
13,261
def fixed_point ( is_zero , plus , minus , f , x ) : @ memo_Y def _fixed_point ( fixed_point_fun ) : def __fixed_point ( collected , new ) : diff = minus ( new , collected ) if is_zero ( diff ) : return collected return fixed_point_fun ( plus ( collected , diff ) , f ( diff ) ) return __fixed_point return _fixed_point ( x , f ( x ) )
Get the least fixed point when it can be computed piecewise .
101
13
13,262
def memo_Y ( f ) : sub = { } def Yf ( * args ) : hashable_args = tuple ( [ repr ( x ) for x in args ] ) if args : if hashable_args not in sub : ret = sub [ hashable_args ] = f ( Yf ) ( * args ) else : ret = sub [ hashable_args ] return ret return f ( Yf ) ( ) return f ( Yf )
Memoized Y combinator .
96
7
13,263
def install ( application , default_content_type , encoding = None ) : try : settings = application . settings [ SETTINGS_KEY ] except KeyError : settings = application . settings [ SETTINGS_KEY ] = ContentSettings ( ) settings . default_content_type = default_content_type settings . default_encoding = encoding return settings
Install the media type management settings .
74
7
13,264
def get_settings ( application , force_instance = False ) : try : return application . settings [ SETTINGS_KEY ] except KeyError : if not force_instance : return None return install ( application , None )
Retrieve the media type settings for a application .
46
10
13,265
def add_binary_content_type ( application , content_type , pack , unpack ) : add_transcoder ( application , handlers . BinaryContentHandler ( content_type , pack , unpack ) )
Add handler for a binary content type .
45
8
13,266
def add_text_content_type ( application , content_type , default_encoding , dumps , loads ) : parsed = headers . parse_content_type ( content_type ) parsed . parameters . pop ( 'charset' , None ) normalized = str ( parsed ) add_transcoder ( application , handlers . TextContentHandler ( normalized , dumps , loads , default_encoding ) )
Add handler for a text content type .
85
8
13,267
def add_transcoder ( application , transcoder , content_type = None ) : settings = get_settings ( application , force_instance = True ) settings [ content_type or transcoder . content_type ] = transcoder
Register a transcoder for a specific content type .
49
10
13,268
def set_default_content_type ( application , content_type , encoding = None ) : settings = get_settings ( application , force_instance = True ) settings . default_content_type = content_type settings . default_encoding = encoding
Store the default content type for an application .
53
9
13,269
def get_response_content_type ( self ) : if self . _best_response_match is None : settings = get_settings ( self . application , force_instance = True ) acceptable = headers . parse_accept ( self . request . headers . get ( 'Accept' , settings . default_content_type if settings . default_content_type else '*/*' ) ) try : selected , _ = algorithms . select_content_type ( acceptable , settings . available_content_types ) self . _best_response_match = '/' . join ( [ selected . content_type , selected . content_subtype ] ) if selected . content_suffix is not None : self . _best_response_match = '+' . join ( [ self . _best_response_match , selected . content_suffix ] ) except errors . NoMatch : self . _best_response_match = settings . default_content_type return self . _best_response_match
Figure out what content type will be used in the response .
210
12
13,270
def send_response ( self , body , set_content_type = True ) : settings = get_settings ( self . application , force_instance = True ) handler = settings [ self . get_response_content_type ( ) ] content_type , data_bytes = handler . to_bytes ( body ) if set_content_type : self . set_header ( 'Content-Type' , content_type ) self . add_header ( 'Vary' , 'Accept' ) self . write ( data_bytes )
Serialize and send body in the response .
112
9
13,271
def connections_from_graph ( env , G , edge_data = False ) : if not issubclass ( G . __class__ , ( Graph , DiGraph ) ) : raise TypeError ( "Graph structure must be derived from Networkx's " "Graph or DiGraph." ) if not hasattr ( env , 'get_agents' ) : raise TypeError ( "Parameter 'env' must have get_agents." ) addrs = env . get_agents ( addr = True ) if len ( addrs ) != len ( G ) : raise ValueError ( "The number of graph nodes and agents in the " "environment (excluding the manager agent) must " "match. Now got {} nodes and {} agents." . format ( len ( G ) , len ( addrs ) ) ) # Sort agent addresses to the order they were added to the environment. addrs = sort_addrs ( addrs ) _addrs2nodes ( addrs , G ) conn_map = _edges2conns ( G , edge_data ) env . create_connections ( conn_map )
Create connections for agents in the given environment from the given NetworkX graph structure .
231
16
13,272
def graph_from_connections ( env , directed = False ) : G = DiGraph ( ) if directed else Graph ( ) conn_list = env . get_connections ( data = True ) for agent , conns in conn_list : G . add_node ( agent ) ebunch = [ ] for nb , data in conns . items ( ) : ebunch . append ( ( agent , nb , data ) ) if len ( ebunch ) > 0 : G . add_edges_from ( ebunch ) return G
Create NetworkX graph from agent connections in a given environment .
115
12
13,273
def _addrs2nodes ( addrs , G ) : for i , n in enumerate ( G . nodes ( ) ) : G . node [ n ] [ 'addr' ] = addrs [ i ]
Map agent addresses to nodes in the graph .
46
9
13,274
def _edges2conns ( G , edge_data = False ) : cm = { } for n in G . nodes ( data = True ) : if edge_data : cm [ n [ 1 ] [ 'addr' ] ] = [ ( G . node [ nb ] [ 'addr' ] , G [ n [ 0 ] ] [ nb ] ) for nb in G [ n [ 0 ] ] ] else : cm [ n [ 1 ] [ 'addr' ] ] = [ ( G . node [ nb ] [ 'addr' ] , { } ) for nb in G [ n [ 0 ] ] ] return cm
Create a mapping from graph edges to agent connections to be created .
137
13
13,275
def profile ( request , status = 200 ) : if request . method == 'GET' : if request . GET . get ( "username" , False ) : try : user_profile = User . objects . get ( username = request . GET . get ( "username" ) , userprofile__public = True ) . userprofile except ObjectDoesNotExist : raise Http404 ( "user not found or have not public profile" ) else : user_id = get_user_id ( request ) if get_config ( 'proso_user' , 'google.openid.migration' , default = True ) and not is_user_id_overridden ( request ) : migrated_user = migrate_google_openid_user ( request . user ) if migrated_user is not None : auth . logout ( request ) migrated_user . backend = 'social.backends.google.GoogleOAuth2' auth . login ( request , migrated_user ) user_profile = get_object_or_404 ( UserProfile , user_id = user_id ) return render_json ( request , user_profile , status = status , template = 'user_profile.html' , help_text = profile . __doc__ ) elif request . method == 'POST' : with transaction . atomic ( ) : to_save = json_body ( request . body . decode ( "utf-8" ) ) user_id = get_user_id ( request ) user_profile = get_object_or_404 ( UserProfile , user_id = user_id ) user = to_save . get ( 'user' , None ) if 'send_emails' in to_save : user_profile . send_emails = bool ( to_save [ 'send_emails' ] ) if 'public' in to_save : user_profile . public = bool ( to_save [ 'public' ] ) if user : error = _save_user ( request , user , new = False ) if error : return render_json ( request , error , template = 'user_json.html' , status = 400 ) if 'properties' in to_save : user_profile . save_properties ( to_save [ 'properties' ] ) user_profile . save ( ) request . method = "GET" return profile ( request , status = 202 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Get the user s profile . If the user has no assigned profile the HTTP 404 is returned . Make a POST request to modify the user s profile .
530
30
13,276
def signup ( request ) : if request . method == 'GET' : return render ( request , 'user_signup.html' , { } , help_text = signup . __doc__ ) elif request . method == 'POST' : if request . user . is_authenticated ( ) and hasattr ( request . user , "userprofile" ) : return render_json ( request , { 'error' : _ ( 'User already logged in' ) , 'error_type' : 'username_logged' } , template = 'user_json.html' , status = 400 ) credentials = json_body ( request . body . decode ( "utf-8" ) ) error = _save_user ( request , credentials , new = True ) if error is not None : return render_json ( request , error , template = 'user_json.html' , status = 400 ) else : auth . login ( request , request . user ) request . method = "GET" return profile ( request , status = 201 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Create a new user with the given credentials .
245
9
13,277
def session ( request ) : if request . user . id is None : # Google Bot return render_json ( request , { 'error' : _ ( 'There is no user available to create a session.' ) , 'error_type' : 'user_undefined' } , status = 400 , template = 'user_json.html' ) if request . method == 'GET' : return render_json ( request , Session . objects . get_current_session ( ) , template = 'user_session.html' , help_text = session . __doc__ ) elif request . method == 'POST' : current_session = Session . objects . get_current_session ( ) if current_session is None : return HttpResponseBadRequest ( "there is no current session to modify" ) data = json_body ( request . body . decode ( "utf-8" ) ) locale = data . get ( 'locale' , None ) time_zone = data . get ( 'time_zone' , None ) display_width = data . get ( 'display_width' , None ) display_height = data . get ( 'display_height' , None ) if locale : current_session . locale = locale if time_zone : current_session . time_zone = TimeZone . objects . from_content ( time_zone ) if display_width : current_session . display_width = display_width if display_height : current_session . display_height = display_height current_session . save ( ) return HttpResponse ( 'ok' , status = 202 ) else : return HttpResponseBadRequest ( "method %s is not allowed" . format ( request . method ) )
Get the information about the current session or modify the current session .
363
13
13,278
def initmobile_view ( request ) : if 'username' in request . GET and 'password' in request . GET : username = request . GET [ 'username' ] password = request . GET [ 'password' ] user = auth . authenticate ( username = username , password = password ) if user is not None : if user . is_active : login ( request , user ) else : user = request . user response = { 'username' : user . username , 'csrftoken' : get_token ( request ) , } if not user . has_usable_password ( ) : password = User . objects . make_random_password ( ) user . set_password ( password ) user . save ( ) response [ 'password' ] = password return HttpResponse ( json . dumps ( response ) )
Create lazy user with a password . Used from the Android app . Also returns csrf token .
171
20
13,279
def parse_device_disk ( token ) : name , token = token . split ( "[" , 1 ) number , flags = token . split ( "]" , 1 ) return name , { "number" : int ( number ) , "write_mostly" : "W" in flags , "faulty" : "F" in flags , "spare" : "S" in flags , "replacement" : "R" in flags , }
Parse a single disk from the header line .
96
10
13,280
def group_by ( what , by ) : return proso . dict . group_keys_by_values ( { x : by ( x ) for x in what } )
Take a list and apply the given function on each its value then group the values by the function results .
37
21
13,281
def copy_resource_dir ( src , dest ) : package_name = "mocha" dest = ( dest + "/" + os . path . basename ( src ) ) . rstrip ( "/" ) if pkg_resources . resource_isdir ( package_name , src ) : if not os . path . isdir ( dest ) : os . makedirs ( dest ) for res in pkg_resources . resource_listdir ( __name__ , src ) : copy_resource_dir ( src + "/" + res , dest ) else : if not os . path . isfile ( dest ) and os . path . splitext ( src ) [ 1 ] not in [ ".pyc" ] : copy_resource_file ( src , dest )
To copy package data directory to destination
165
7
13,282
def init ( ) : mochapyfile = os . path . join ( os . path . join ( CWD , "brew.py" ) ) header ( "Initializing Mocha ..." ) if os . path . isfile ( mochapyfile ) : print ( "WARNING: It seems like Mocha is already setup!" ) print ( "*" * 80 ) else : print ( "" ) print ( "Copying files to the current directory..." ) copy_resource_dir ( SKELETON_DIR + "/create/" , CWD ) print ( "" ) _npm_install_static ( ) print ( "" ) print ( "----- Your Mocha is ready! ----" ) print ( "" ) print ( "> What's next?" ) print ( "- Edit the config [ application/config.py ] " ) print ( "- If necessary setup your model database [ mocha :initdb ]" ) print ( "- Launch app on development mode, run [ mocha :serve ]" ) print ( "" ) print ( "*" * 80 )
Setup Mocha in the current directory
225
7
13,283
def add_view ( name , no_template ) : app_dest = APPLICATION_DIR viewsrc = "%s/create-view/view.py" % SKELETON_DIR tplsrc = "%s/create-view/template.jade" % SKELETON_DIR viewdest_dir = os . path . join ( app_dest , "views" ) viewdest = os . path . join ( viewdest_dir , "%s.py" % name ) tpldest_dir = os . path . join ( app_dest , "templates/%s/Index" % name ) tpldest = os . path . join ( tpldest_dir , "index.jade" ) header ( "Adding New View" ) print ( "View: %s" % viewdest . replace ( CWD , "" ) ) if not no_template : print ( "Template: %s" % tpldest . replace ( CWD , "" ) ) else : print ( "* Template will not be created because of the flag --no-template| -t" ) if os . path . isfile ( viewdest ) or os . path . isfile ( tpldest ) : print ( "*** ERROR: View or Template file exist already" ) else : if not os . path . isdir ( viewdest_dir ) : utils . make_dirs ( viewdest_dir ) copy_resource_file ( viewsrc , viewdest ) with open ( viewdest , "r+" ) as vd : content = vd . read ( ) . replace ( "%ROUTE%" , name . lower ( ) ) . replace ( "%NAV_TITLE%" , name . capitalize ( ) ) vd . seek ( 0 ) vd . write ( content ) vd . truncate ( ) if not no_template : if not os . path . isdir ( tpldest_dir ) : utils . make_dirs ( tpldest_dir ) copy_resource_file ( tplsrc , tpldest ) print ( "" ) print ( "*" * 80 )
Create a new view and template page
461
7
13,284
def initdb ( ) : print ( "Syncing up database..." ) cwd_to_sys_path ( ) if db and hasattr ( db , "Model" ) : db . create_all ( ) for m in db . Model . __subclasses__ ( ) : if hasattr ( m , "initialize__" ) : print ( "Sync up model: %s ..." % m . __name__ ) getattr ( m , "initialize__" ) ( ) print ( "Done" )
Sync database Create new tables etc ...
108
7
13,285
def _set_flask_alembic ( ) : from flask_alembic import Alembic application . app . extensions [ "sqlalchemy" ] = type ( '' , ( ) , { "db" : db } ) alembic = Alembic ( ) alembic . init_app ( application . app ) return alembic
Add the SQLAlchemy object in the global extension
76
10
13,286
def assets2s3 ( ) : import flask_s3 header ( "Assets2S3..." ) print ( "" ) print ( "Building assets files..." ) print ( "" ) build_assets ( application . app ) print ( "" ) print ( "Uploading assets files to S3 ..." ) flask_s3 . create_all ( application . app ) print ( "" )
Upload assets files to S3
80
6
13,287
def launch ( thing , title = False ) : html = htmlFromThing ( thing , title = title ) if not html : print ( "no HTML was generated." ) return fname = "%s/%s.html" % ( tempfile . gettempdir ( ) , str ( time . time ( ) ) ) with open ( fname , 'w' ) as f : f . write ( html ) webbrowser . open ( fname )
analyze a thing create a nice HTML document and launch it .
95
13
13,288
def analyzeThing ( originalThing2 ) : originalThing = copy . copy ( originalThing2 ) things = { } for name in sorted ( dir ( originalThing ) ) : print ( "analyzing" , name ) thing = copy . copy ( originalThing ) if name in webinspect . blacklist or name . lower ( ) in webinspect . blacklist : item = "DID NOT EVALUATE (this will appear as a string)" else : item = getattr ( thing , name ) itemType = type ( item ) . __name__ itemStr = thingToString ( item ) itemEval = "" if "method" in itemStr : if name in webinspect . blacklist or name . lower ( ) in webinspect . blacklist : itemEval = "DID NOT EVALUATE" else : print ( "executing %s()" % name ) print ( "I'm about to try..." ) try : itemEval = thingToString ( getattr ( thing , name ) ( ) ) except Exception as e : exceptionToString ( e ) #print("[%s] (%s) %s {%s}"%(name,itemType,itemStr,itemEval)) things [ name ] = [ itemType , itemStr , itemEval ] return things
analyze an object and all its attirbutes . Returns a dictionary .
280
16
13,289
def websafe ( s ) : s = s . replace ( "<" , "&lt;" ) . replace ( ">" , "&gt;" ) s = s . replace ( r'\x' , r' \x' ) s = s . replace ( "\n" , "<br>" ) return s
return a string with HTML - safe text
66
8
13,290
def slugify ( text , delim = '-' ) : punctuation_re = re . compile ( r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+' ) result = [ ] for word in punctuation_re . split ( text . lower ( ) ) : word = normalize_text ( word ) if word : result . append ( word ) return delim . join ( result )
Generates an slightly worse ASCII - only slug .
101
10
13,291
def javascript_escape ( s , quote_double_quotes = True ) : ustring_re = re . compile ( u"([\u0080-\uffff])" ) def fix ( match ) : return r"\u%04x" % ord ( match . group ( 1 ) ) if type ( s ) == str : s = s . decode ( 'utf-8' ) elif type ( s ) != six . text_type : raise TypeError ( s ) s = s . replace ( '\\' , '\\\\' ) s = s . replace ( '\r' , '\\r' ) s = s . replace ( '\n' , '\\n' ) s = s . replace ( '\t' , '\\t' ) s = s . replace ( "'" , "\\'" ) if quote_double_quotes : s = s . replace ( '"' , '&quot;' ) return str ( ustring_re . sub ( fix , s ) )
Escape characters for javascript strings .
218
7
13,292
def seconds_to_hms_verbose ( t ) : hours = int ( ( t / 3600 ) ) mins = int ( ( t / 60 ) % 60 ) secs = int ( t % 60 ) return ' ' . join ( [ ( hours + ' hour' + ( 's' if hours > 1 else '' ) ) if hours > 0 else '' , ( mins + ' minute' + ( 's' if mins > 1 else '' ) ) if mins > 0 else '' , ( secs + ' second' + ( 's' if secs > 1 else '' ) ) if secs > 0 else '' ] )
Converts seconds float to H hours 8 minutes 30 seconds format
135
12
13,293
def pretty_render ( data , format = 'text' , indent = 0 ) : if format == 'json' : return render_json ( data ) elif format == 'html' : return render_html ( data ) elif format == 'xml' : return render_xml ( data ) else : return dict_to_plaintext ( data , indent = indent )
Render a dict based on a format
78
7
13,294
def dict_to_xml ( xml_dict ) : import lxml . etree as etree root_tag = list ( xml_dict . keys ( ) ) [ 0 ] root = etree . Element ( root_tag ) _dict_to_xml_recurse ( root , xml_dict [ root_tag ] ) return root
Converts a dictionary to an XML ElementTree Element
72
10
13,295
def xml_get_tag ( xml , tag , parent_tag = None , multi_line = False ) : expr_str = '[<:]' + tag + '.*?>(?P<matched_text>.+?)<' if parent_tag : expr_str = '[<:]' + parent_tag + '.*?>.*?' + expr_str expr = re . compile ( expr_str , re . DOTALL | re . IGNORECASE ) if multi_line : return expr . findall ( xml ) else : if expr . search ( xml ) : return expr . search ( xml ) . group ( 'matched_text' ) . strip ( ) else : return None
Returns the tag data for the first instance of the named tag or for all instances if multi is true . If a parent tag is specified then that will be required before the tag .
147
36
13,296
def _build_table ( self ) -> Dict [ State , Tuple [ Multiplex , ... ] ] : result : Dict [ State , Tuple [ Multiplex , ... ] ] = { } for state in self . influence_graph . all_states ( ) : result [ state ] = tuple ( multiplex for multiplex in self . influence_graph . multiplexes if multiplex . is_active ( state ) ) return result
Private method which build the table which map a State to the active multiplex .
92
16
13,297
def _to_base36 ( number ) : if number < 0 : raise ValueError ( "Cannot encode negative numbers" ) chars = "" while number != 0 : number , i = divmod ( number , 36 ) # 36-character alphabet chars = _alphabet [ i ] + chars return chars or "0"
Convert a positive integer to a base36 string .
67
11
13,298
def _pad ( string , size ) : strlen = len ( string ) if strlen == size : return string if strlen < size : return _padding [ 0 : size - strlen ] + string return string [ - size : ]
Pad a string with leading zeroes to fit the given size truncating if necessary .
50
18
13,299
def _random_block ( ) : # TODO: Use a better RNG than random.randint random_number = random . randint ( 0 , DISCRETE_VALUES ) random_string = _to_base36 ( random_number ) return _pad ( random_string , BLOCK_SIZE )
Generate a random string of BLOCK_SIZE length .
67
12