idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
8,500
def export_to_directory_crtomo ( self , directory , norrec = 'norrec' ) : exporter_crtomo . write_files_to_directory ( self . data , directory , norrec = norrec )
Export the sEIT data into data files that can be read by CRTomo .
51
18
8,501
def export_to_crtomo_seit_manager ( self , grid ) : import crtomo g = self . data . groupby ( 'frequency' ) seit_data = { } for name , item in g : print ( name , item . shape , item . size ) if item . shape [ 0 ] > 0 : seit_data [ name ] = item [ [ 'a' , 'b' , 'm' , 'n' , 'r' , 'rpha' ] ] . values seit = crtomo . eitMan ( grid = grid , seit_data = seit_data ) return seit
Return a ready - initialized seit - manager object from the CRTomo tools . This function only works if the crtomo_tools are installed .
139
31
8,502
def get_tape ( self , start = 0 , end = 10 ) : self . tape_start = start self . tape_end = end self . tape_length = end - start tmp = '\n' + "|" + str ( start ) + "| " for i in xrange ( len ( self . tape [ start : end ] ) ) : if i == self . cur_cell : tmp += "[" + str ( self . tape [ i ] ) + "] " else : tmp += ":" + str ( self . tape [ i ] ) + ": " tmp += " |" + str ( end ) + "|" return tmp
Pretty prints the tape values
141
5
8,503
def import_sip04 ( self , filename , timestep = None ) : df = reda_sip04 . import_sip04_data ( filename ) if timestep is not None : print ( 'adding timestep' ) df [ 'timestep' ] = timestep self . _add_to_container ( df ) print ( 'Summary:' ) self . _describe_data ( df )
SIP04 data import
93
5
8,504
def check_dataframe ( self , dataframe ) : if dataframe is None : return None # is this a DataFrame if not isinstance ( dataframe , pd . DataFrame ) : raise Exception ( 'The provided dataframe object is not a pandas.DataFrame' ) for column in self . required_columns : if column not in dataframe : raise Exception ( 'Required column not in dataframe: {0}' . format ( column ) ) return dataframe
Check the given dataframe for the required type and columns
101
11
8,505
def reduce_duplicate_frequencies ( self ) : group_keys = [ 'frequency' , ] if 'timestep' in self . data . columns : group_keys = group_keys + [ 'timestep' , ] g = self . data . groupby ( group_keys ) def group_apply ( item ) : y = item [ [ 'zt_1' , 'zt_2' , 'zt_3' ] ] . values . flatten ( ) zt_imag_std = np . std ( y . imag ) zt_real_std = np . std ( y . real ) zt_imag_min = np . min ( y . imag ) zt_real_min = np . min ( y . real ) zt_imag_max = np . max ( y . imag ) zt_real_max = np . max ( y . real ) zt_imag_mean = np . mean ( y . imag ) zt_real_mean = np . mean ( y . real ) dfn = pd . DataFrame ( { 'zt_real_mean' : zt_real_mean , 'zt_real_std' : zt_real_std , 'zt_real_min' : zt_real_min , 'zt_real_max' : zt_real_max , 'zt_imag_mean' : zt_imag_mean , 'zt_imag_std' : zt_imag_std , 'zt_imag_min' : zt_imag_min , 'zt_imag_max' : zt_imag_max , } , index = [ 0 , ] ) dfn [ 'count' ] = len ( y ) dfn . index . name = 'index' return dfn p = g . apply ( group_apply ) p . index = p . index . droplevel ( 'index' ) if len ( group_keys ) > 1 : p = p . swaplevel ( 0 , 1 ) . sort_index ( ) return p
In case multiple frequencies were measured average them and compute std min max values for zt .
455
18
8,506
def _load_class ( class_path ) : parts = class_path . rsplit ( '.' , 1 ) module = __import__ ( parts [ 0 ] , fromlist = parts [ 1 ] ) return getattr ( module , parts [ 1 ] )
Load the module and return the required class .
55
9
8,507
def rev_comp ( seq , molecule = 'dna' ) : if molecule == 'dna' : nuc_dict = { "A" : "T" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "T" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } elif molecule == 'rna' : nuc_dict = { "A" : "U" , "B" : "V" , "C" : "G" , "D" : "H" , "G" : "C" , "H" : "D" , "K" : "M" , "M" : "K" , "N" : "N" , "R" : "Y" , "S" : "S" , "U" : "A" , "V" : "B" , "W" : "W" , "Y" : "R" } else : raise ValueError ( "rev_comp requires molecule to be dna or rna" ) if not isinstance ( seq , six . string_types ) : raise TypeError ( "seq must be a string!" ) return '' . join ( [ nuc_dict [ c ] for c in seq . upper ( ) [ : : - 1 ] ] )
DNA|RNA seq - > reverse complement
360
8
8,508
def from_json ( cls , key , scopes , subject = None ) : credentials_type = key [ 'type' ] if credentials_type != 'service_account' : raise ValueError ( 'key: expected type service_account ' '(got %s)' % credentials_type ) email = key [ 'client_email' ] key = OpenSSL . crypto . load_privatekey ( OpenSSL . crypto . FILETYPE_PEM , key [ 'private_key' ] ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
Alternate constructor intended for using JSON format of private key .
126
12
8,509
def from_pkcs12 ( cls , key , email , scopes , subject = None , passphrase = PKCS12_PASSPHRASE ) : key = OpenSSL . crypto . load_pkcs12 ( key , passphrase ) . get_privatekey ( ) return cls ( key = key , email = email , scopes = scopes , subject = subject )
Alternate constructor intended for using . p12 files .
84
11
8,510
def issued_at ( self ) : issued_at = self . _issued_at if issued_at is None : self . _issued_at = int ( time . time ( ) ) return self . _issued_at
Time when access token was requested as seconds since epoch .
47
11
8,511
def access_token ( self ) : if ( self . _access_token is None or self . expiration_time <= int ( time . time ( ) ) ) : resp = self . make_access_request ( ) self . _access_token = resp . json ( ) [ 'access_token' ] return self . _access_token
Stores always valid OAuth2 access token .
72
10
8,512
def make_access_request ( self ) : del self . issued_at assertion = b'.' . join ( ( self . header ( ) , self . claims ( ) , self . signature ( ) ) ) post_data = { 'grant_type' : GRANT_TYPE , 'assertion' : assertion , } resp = requests . post ( AUDIENCE , post_data ) if resp . status_code != 200 : raise AuthenticationError ( resp ) return resp
Makes an OAuth2 access token request with crafted JWT and signature .
99
16
8,513
def authorized_request ( self , method , url , * * kwargs ) : headers = kwargs . pop ( 'headers' , { } ) if headers . get ( 'Authorization' ) or kwargs . get ( 'auth' ) : raise ValueError ( "Found custom Authorization header, " "method call would override it." ) headers [ 'Authorization' ] = 'Bearer ' + self . access_token return requests . request ( method , url , headers = headers , * * kwargs )
Shortcut for requests . request with proper Authorization header .
111
11
8,514
def import_txt ( filename , * * kwargs ) : # read in text file into a buffer with open ( filename , 'r' ) as fid : text = fid . read ( ) strings_to_replace = { 'Mixed / non conventional' : 'Mixed/non-conventional' , 'Date' : 'Date Time AM-PM' , } for key in strings_to_replace . keys ( ) : text = text . replace ( key , strings_to_replace [ key ] ) buffer = StringIO ( text ) # read data file data_raw = pd . read_csv ( buffer , # sep='\t', delim_whitespace = True , ) # clean up column names data_raw . columns = [ x . strip ( ) for x in data_raw . columns . tolist ( ) ] # generate electrode positions data = _convert_coords_to_abmn_X ( data_raw [ [ 'Spa.1' , 'Spa.2' , 'Spa.3' , 'Spa.4' ] ] , * * kwargs ) # [mV] / [mA] data [ 'r' ] = data_raw [ 'Vp' ] / data_raw [ 'In' ] data [ 'Vmn' ] = data_raw [ 'Vp' ] data [ 'Iab' ] = data_raw [ 'In' ] # rename electrode denotations rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] return data , None , None
Import Syscal measurements from a text file exported as Spreadsheet .
403
14
8,515
def import_bin ( filename , * * kwargs ) : metadata , data_raw = _import_bin ( filename ) skip_rows = kwargs . get ( 'skip_rows' , 0 ) if skip_rows > 0 : data_raw . drop ( data_raw . index [ range ( 0 , skip_rows ) ] , inplace = True ) data_raw = data_raw . reset_index ( ) if kwargs . get ( 'check_meas_nums' , True ) : # check that first number is 0 if data_raw [ 'measurement_num' ] . iloc [ 0 ] != 0 : print ( 'WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)' ) # check that all measurement numbers increase by one if not np . all ( np . diff ( data_raw [ 'measurement_num' ] ) ) == 1 : print ( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw [ 'measurement_num' ] . diff ( ) [ 1 : ] jump = np . where ( diff != 1 ) [ 0 ] if len ( jump ) > 0 : print ( 'WARNING: One or more jumps in measurement numbers detected' ) print ( 'The jump indices are:' ) for jump_nr in jump : print ( jump_nr ) print ( 'Removing data points subsequent to the first jump' ) data_raw = data_raw . iloc [ 0 : jump [ 0 ] + 1 , : ] if data_raw . shape [ 0 ] == 0 : # no data present, return a bare DataFrame return pd . DataFrame ( columns = [ 'a' , 'b' , 'm' , 'n' , 'r' ] ) , None , None data = _convert_coords_to_abmn_X ( data_raw [ [ 'x_a' , 'x_b' , 'x_m' , 'x_n' ] ] , * * kwargs ) # [mV] / [mA] data [ 'r' ] = data_raw [ 'vp' ] / data_raw [ 'Iab' ] data [ 'Vmn' ] = data_raw [ 'vp' ] data [ 'vab' ] = data_raw [ 'vab' ] data [ 'Iab' ] = data_raw [ 'Iab' ] data [ 'mdelay' ] = data_raw [ 'mdelay' ] data [ 'Tm' ] = data_raw [ 'Tm' ] data [ 'Mx' ] = data_raw [ 'Mx' ] data [ 'chargeability' ] = data_raw [ 'm' ] data [ 'q' ] = data_raw [ 'q' ] # rename electrode denotations rec_max = kwargs . get ( 'reciprocals' , None ) if rec_max is not None : print ( 'renumbering electrode numbers' ) data [ [ 'a' , 'b' , 'm' , 'n' ] ] = rec_max + 1 - data [ [ 'a' , 'b' , 'm' , 'n' ] ] # print(data) return data , None , None
Read a . bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing . This dataframe contains only information currently deemed important . Use the function reda . importers . iris_syscal_pro_binary . _import_bin to extract ALL information from a given . bin file .
755
70
8,516
def call_and_notificate ( args , opts ) : # store starttime stctime = time . clock ( ) stttime = time . time ( ) stdtime = datetime . datetime . now ( ) # call subprocess exit_code , output = call ( args ) # calculate delta cdelta = time . clock ( ) - stctime tdelta = time . time ( ) - stttime endtime = datetime . datetime . now ( ) if exit_code == 0 : status = u"Success" else : status = u"Fail (%d)" % exit_code # create email body body = EMAIL_BODY % { 'prog' : get_command_str ( args ) , 'status' : status , 'stdtime' : stdtime , 'endtime' : endtime , 'tdelta' : tdelta , 'cdelta' : cdelta , 'output' : output , 'cwd' : os . getcwd ( ) , } # create email subject subject = opts . subject % { 'prog' : get_command_str ( args ) , 'status' : status . lower ( ) , } # create email message msg = create_message ( opts . from_addr , opts . to_addr , subject , body , opts . encoding ) # obtain password from keyring password = keyring . get_password ( 'notify' , opts . username ) # send email send_email ( msg , opts . host , opts . port , opts . username , password )
Execute specified arguments and send notification email
335
8
8,517
def get_thumbnail_format ( self ) : if self . field . thumbnail_format : # Over-ride was given, use that instead. return self . field . thumbnail_format . lower ( ) else : # Use the existing extension from the file. filename_split = self . name . rsplit ( '.' , 1 ) return filename_split [ - 1 ]
Determines the target thumbnail type either by looking for a format override specified at the model level or by using the format the user uploaded .
78
28
8,518
def save ( self , name , content , save = True ) : super ( ImageWithThumbsFieldFile , self ) . save ( name , content , save ) try : self . generate_thumbs ( name , content ) except IOError , exc : if 'cannot identify' in exc . message or 'bad EPS header' in exc . message : raise UploadedImageIsUnreadableError ( "We were unable to read the uploaded image. " "Please make sure you are uploading a valid image file." ) else : raise
Handles some extra logic to generate the thumbnails when the original file is uploaded .
110
17
8,519
def delete ( self , save = True ) : for thumb in self . field . thumbs : thumb_name , thumb_options = thumb thumb_filename = self . _calc_thumb_filename ( thumb_name ) self . storage . delete ( thumb_filename ) super ( ImageWithThumbsFieldFile , self ) . delete ( save )
Deletes the original plus any thumbnails . Fails silently if there are errors deleting the thumbnails .
73
21
8,520
def dump_edn_val ( v ) : if isinstance ( v , ( str , unicode ) ) : return json . dumps ( v ) elif isinstance ( v , E ) : return unicode ( v ) else : return dumps ( v )
edn simple value dump
55
5
8,521
def tx_schema ( self , * * kwargs ) : for s in self . schema . schema : tx = self . tx ( s , * * kwargs )
Builds the data structure edn and puts it in the db
38
13
8,522
def tx ( self , * args , * * kwargs ) : if 0 == len ( args ) : return TX ( self ) ops = [ ] for op in args : if isinstance ( op , list ) : ops += op elif isinstance ( op , ( str , unicode ) ) : ops . append ( op ) if 'debug' in kwargs : pp ( ops ) tx_proc = "[ %s ]" % "" . join ( ops ) x = self . rest ( 'POST' , self . uri_db , data = { "tx-data" : tx_proc } ) return x
Executes a raw tx string or get a new TX object to work with .
132
16
8,523
def e ( self , eid ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/entity' , data = { 'e' : int ( eid ) } , parse = True ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched entity %s in %sms' % ( eid , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
Get an Entity
114
3
8,524
def retract ( self , e , a , v ) : ta = datetime . datetime . now ( ) ret = u"[:db/retract %i :%s %s]" % ( e , a , dump_edn_val ( v ) ) rs = self . tx ( ret ) tb = datetime . datetime . now ( ) - ta print cl ( '<<< retracted %s,%s,%s in %sms' % ( e , a , v , tb . microseconds / 1000.0 ) , 'cyan' ) return rs
redact the value of an attribute
124
7
8,525
def datoms ( self , index = 'aevt' , e = '' , a = '' , v = '' , limit = 0 , offset = 0 , chunk = 100 , start = '' , end = '' , since = '' , as_of = '' , history = '' , * * kwargs ) : assert index in [ 'aevt' , 'eavt' , 'avet' , 'vaet' ] , "non-existant index" data = { 'index' : index , 'a' : ':{0}' . format ( a ) if a else '' , 'v' : dump_edn_val ( v ) if v else '' , 'e' : int ( e ) if e else '' , 'offset' : offset or 0 , 'start' : start , 'end' : end , 'limit' : limit , 'history' : 'true' if history else '' , 'as-of' : int ( as_of ) if as_of else '' , 'since' : int ( since ) if since else '' , } data [ 'limit' ] = offset + chunk rs = True while rs and ( data [ 'offset' ] < ( limit or 1000000000 ) ) : ta = datetime . datetime . now ( ) rs = self . rest ( 'GET' , self . uri_db + '-/datoms' , data = data , parse = True ) if not len ( rs ) : rs = False tb = datetime . datetime . now ( ) - ta print cl ( '<<< fetched %i datoms at offset %i in %sms' % ( len ( rs ) , data [ 'offset' ] , tb . microseconds / 1000.0 ) , 'cyan' ) for r in rs : yield r data [ 'offset' ] += chunk
Returns a lazy generator that will only fetch groups of datoms at the chunk size specified .
396
18
8,526
def debug ( self , defn , args , kwargs , fmt = None , color = 'green' ) : ta = datetime . datetime . now ( ) rs = defn ( * args , * * kwargs ) tb = datetime . datetime . now ( ) - ta fmt = fmt or "processed {defn} in {ms}ms" logmsg = fmt . format ( ms = tb . microseconds / 1000.0 , defn = defn ) "terminal output" print cl ( logmsg , color ) "logging output" logging . debug ( logmsg ) return rs
debug timing colored terminal output
133
5
8,527
def find ( self , * args , * * kwargs ) : return Query ( * args , db = self , schema = self . schema )
new query builder on current db
31
6
8,528
def hashone ( self ) : rs = self . one ( ) if not rs : return { } else : finds = " " . join ( self . _find ) . split ( ' ' ) return dict ( zip ( ( x . replace ( '?' , '' ) for x in finds ) , rs ) )
execute query get back
65
4
8,529
def all ( self ) : query , inputs = self . _toedn ( ) return self . db . q ( query , inputs = inputs , limit = self . _limit , offset = self . _offset , history = self . _history )
execute query get all list of lists
52
7
8,530
def _toedn ( self ) : finds = u"" inputs = u"" wheres = u"" args = [ ] ": in and args" for a , b in self . _input : inputs += " {0}" . format ( a ) args . append ( dump_edn_val ( b ) ) if inputs : inputs = u":in ${0}" . format ( inputs ) " :where " for where in self . _where : if isinstance ( where , ( str , unicode ) ) : wheres += u"[{0}]" . format ( where ) elif isinstance ( where , ( list ) ) : wheres += u" " . join ( [ u"[{0}]" . format ( w ) for w in where ] ) " find: " if self . _find == [ ] : #find all fs = set ( ) for p in wheres . replace ( '[' , ' ' ) . replace ( ']' , ' ' ) . split ( ' ' ) : if p . startswith ( '?' ) : fs . add ( p ) self . _find = list ( fs ) finds = " " . join ( self . _find ) " all togethr now..." q = u"""[ :find {0} {1} :where {2} ]""" . format ( finds , inputs , wheres ) return q , args
prepare the query for the rest api
291
8
8,531
def add ( self , * args , * * kwargs ) : assert self . resp is None , "Transaction already committed" entity , av_pairs , args = None , [ ] , list ( args ) if len ( args ) : if isinstance ( args [ 0 ] , ( int , long ) ) : " first arg is an entity or tempid" entity = E ( args [ 0 ] , tx = self ) elif isinstance ( args [ 0 ] , E ) : " dont resuse entity from another tx" if args [ 0 ] . _tx is self : entity = args [ 0 ] else : if int ( args [ 0 ] ) > 0 : " use the entity id on a new obj" entity = E ( int ( args [ 0 ] ) , tx = self ) args [ 0 ] = None " drop the first arg" if entity is not None or args [ 0 ] in ( None , False , 0 ) : v = args . pop ( 0 ) " auto generate a temp id?" if entity is None : entity = E ( self . ctmpid , tx = self ) self . ctmpid -= 1 " a,v from kwargs" if len ( args ) == 0 and kwargs : for a , v in kwargs . iteritems ( ) : self . addeav ( entity , a , v ) " a,v from args " if len ( args ) : assert len ( args ) % 2 == 0 , "imbalanced a,v in args: " % args for first , second in pairwise ( args ) : if not first . startswith ( ':' ) : first = ':' + first if not first . endswith ( '/' ) : " longhand used: blah/blah " if isinstance ( second , list ) : for v in second : self . addeav ( entity , first , v ) else : self . addeav ( entity , first , second ) continue elif isinstance ( second , dict ) : " shorthand used: blah/, dict " for a , v in second . iteritems ( ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue elif isinstance ( second , ( list , tuple ) ) : " shorthand used: blah/, list|tuple " for a , v in pairwise ( second ) : self . addeav ( entity , "%s%s" % ( first , a ) , v ) continue else : raise Exception , "invalid pair: %s : %s" % ( first , second ) "pass back the entity so it can be resolved after tx()" return entity
Accumulate datums for the transaction
565
8
8,532
def resolve ( self ) : assert isinstance ( self . resp , dict ) , "Transaction in uncommitted or failed state" rids = [ ( v ) for k , v in self . resp [ 'tempids' ] . items ( ) ] self . txid = self . resp [ 'tx-data' ] [ 0 ] [ 'tx' ] rids . reverse ( ) for t in self . tmpents : pos = self . tmpents . index ( t ) t . _eid , t . _txid = rids [ pos ] , self . txid for t in self . realents : t . _txid = self . txid
Resolve one or more tempids . Automatically takes place after transaction is executed .
140
17
8,533
def get_usage ( self ) : resp = requests . get ( FITNESS_URL , timeout = 30 ) resp . raise_for_status ( ) soup = BeautifulSoup ( resp . text , "html5lib" ) eastern = pytz . timezone ( 'US/Eastern' ) output = [ ] for item in soup . findAll ( "div" , { "class" : "barChart" } ) : data = [ x . strip ( ) for x in item . get_text ( "\n" ) . strip ( ) . split ( "\n" ) ] data = [ x for x in data if x ] name = re . sub ( r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$" , "" , data [ 0 ] , re . I ) . strip ( ) output . append ( { "name" : name , "open" : "Open" in data [ 1 ] , "count" : int ( data [ 2 ] . rsplit ( " " , 1 ) [ - 1 ] ) , "updated" : eastern . localize ( datetime . datetime . strptime ( data [ 3 ] [ 8 : ] . strip ( ) , '%m/%d/%Y %I:%M %p' ) ) . isoformat ( ) , "percent" : int ( data [ 4 ] [ : - 1 ] ) } ) return output
Get fitness locations and their current usage .
307
8
8,534
def search ( self , keyword ) : params = { "source" : "map" , "description" : keyword } data = self . _request ( ENDPOINTS [ 'SEARCH' ] , params ) data [ 'result_data' ] = [ res for res in data [ 'result_data' ] if isinstance ( res , dict ) ] return data
Return all buildings related to the provided query .
79
9
8,535
def compute_K_numerical ( dataframe , settings = None , keep_dir = None ) : inversion_code = reda . rcParams . get ( 'geom_factor.inversion_code' , 'crtomo' ) if inversion_code == 'crtomo' : import reda . utils . geom_fac_crtomo as geom_fac_crtomo if keep_dir is not None : keep_dir = os . path . abspath ( keep_dir ) K = geom_fac_crtomo . compute_K ( dataframe , settings , keep_dir ) else : raise Exception ( 'Inversion code {0} not implemented for K computation' . format ( inversion_code ) ) return K
Use a finite - element modeling code to infer geometric factors for meshes with topography or irregular electrode spacings .
167
22
8,536
def _get_object_key ( self , p_object ) : matched_key = None matched_index = None if hasattr ( p_object , self . _searchNames [ 0 ] ) : return getattr ( p_object , self . _searchNames [ 0 ] ) for x in xrange ( len ( self . _searchNames ) ) : key = self . _searchNames [ x ] if hasattr ( p_object , key ) : matched_key = key matched_index = x if matched_key is None : raise KeyError ( ) if matched_index != 0 and self . _searchOptimize : self . _searchNames . insert ( 0 , self . _searchNames . pop ( matched_index ) ) return getattr ( p_object , matched_key )
Get key from object
168
4
8,537
def correct ( self , temp , we_t ) : if not PIDTempComp . in_range ( temp ) : return None n_t = self . cf_t ( temp ) if n_t is None : return None we_c = we_t * n_t return we_c
Compute weC from weT
63
7
8,538
def compute_norrec_differences ( df , keys_diff ) : raise Exception ( 'This function is depreciated!' ) print ( 'computing normal-reciprocal differences' ) # df.sort_index(level='norrec') def norrec_diff ( x ) : """compute norrec_diff""" if x . shape [ 0 ] != 2 : return np . nan else : return np . abs ( x . iloc [ 1 ] - x . iloc [ 0 ] ) keys_keep = list ( set ( df . columns . tolist ( ) ) - set ( keys_diff ) ) agg_dict = { x : _first for x in keys_keep } agg_dict . update ( { x : norrec_diff for x in keys_diff } ) for key in ( 'id' , 'timestep' , 'frequency' ) : if key in agg_dict : del ( agg_dict [ key ] ) # for frequencies, we could (I think) somehow prevent grouping by # frequencies... df = df . groupby ( ( 'timestep' , 'frequency' , 'id' ) ) . agg ( agg_dict ) # df.rename(columns={'r': 'Rdiff'}, inplace=True) df . reset_index ( ) return df
DO NOT USE ANY MORE - DEPRECIATED!
282
11
8,539
def _normalize_abmn ( abmn ) : abmn_2d = np . atleast_2d ( abmn ) abmn_normalized = np . hstack ( ( np . sort ( abmn_2d [ : , 0 : 2 ] , axis = 1 ) , np . sort ( abmn_2d [ : , 2 : 4 ] , axis = 1 ) , ) ) return abmn_normalized
return a normalized version of abmn
94
7
8,540
def assign_norrec_diffs ( df , diff_list ) : extra_dims = [ x for x in ( 'timestep' , 'frequency' , 'id' ) if x in df . columns ] g = df . groupby ( extra_dims ) def subrow ( row ) : if row . size == 2 : return row . iloc [ 1 ] - row . iloc [ 0 ] else : return np . nan for diffcol in diff_list : diff = g [ diffcol ] . agg ( subrow ) . reset_index ( ) # rename the column cols = list ( diff . columns ) cols [ - 1 ] = diffcol + 'diff' diff . columns = cols df = df . drop ( cols [ - 1 ] , axis = 1 , errors = 'ignore' ) . merge ( diff , on = extra_dims , how = 'outer' ) df = df . sort_values ( extra_dims ) return df
Compute and write the difference between normal and reciprocal values for all columns specified in the diff_list parameter .
211
22
8,541
def handle_authenticated_user ( self , response ) : current_user = get_user ( self . request ) ulogin , registered = ULoginUser . objects . get_or_create ( uid = response [ 'uid' ] , network = response [ 'network' ] , defaults = { 'identity' : response [ 'identity' ] , 'user' : current_user } ) if not registered : ulogin_user = ulogin . user logger . debug ( 'uLogin user already exists' ) if current_user != ulogin_user : logger . debug ( "Mismatch: %s is not a %s. Take over it!" % ( current_user , ulogin_user ) ) ulogin . user = current_user ulogin . save ( ) return get_user ( self . request ) , ulogin , registered
Handles the ULogin response if user is already authenticated
182
11
8,542
def form_valid ( self , form ) : response = self . ulogin_response ( form . cleaned_data [ 'token' ] , self . request . get_host ( ) ) if 'error' in response : return render ( self . request , self . error_template_name , { 'json' : response } ) if user_is_authenticated ( get_user ( self . request ) ) : user , identity , registered = self . handle_authenticated_user ( response ) else : user , identity , registered = self . handle_anonymous_user ( response ) assign . send ( sender = ULoginUser , user = get_user ( self . request ) , request = self . request , registered = registered , ulogin_user = identity , ulogin_data = response ) return redirect ( self . request . GET . get ( REDIRECT_FIELD_NAME ) or '/' )
The request from ulogin service is correct
192
8
8,543
def ulogin_response ( self , token , host ) : response = requests . get ( settings . TOKEN_URL , params = { 'token' : token , 'host' : host } ) content = response . content if sys . version_info >= ( 3 , 0 ) : content = content . decode ( 'utf8' ) return json . loads ( content )
Makes a request to ULOGIN
78
8
8,544
def initialise_parsimonious_states ( tree , feature , states ) : ps_feature_down = get_personalized_feature_name ( feature , BU_PARS_STATES ) ps_feature = get_personalized_feature_name ( feature , PARS_STATES ) all_states = set ( states ) for node in tree . traverse ( ) : state = getattr ( node , feature , set ( ) ) if not state : node . add_feature ( ps_feature_down , all_states ) else : node . add_feature ( ps_feature_down , state ) node . add_feature ( ps_feature , getattr ( node , ps_feature_down ) )
Initializes the bottom - up state arrays for tips based on their states given by the feature .
152
19
8,545
def uppass ( tree , feature ) : ps_feature = get_personalized_feature_name ( feature , BU_PARS_STATES ) for node in tree . traverse ( 'postorder' ) : if not node . is_leaf ( ) : children_states = get_most_common_states ( getattr ( child , ps_feature ) for child in node . children ) node_states = getattr ( node , ps_feature ) state_intersection = node_states & children_states node . add_feature ( ps_feature , state_intersection if state_intersection else node_states )
UPPASS traverses the tree starting from the tips and going up till the root and assigns to each parent node a state based on the states of its child nodes .
133
33
8,546
def parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) : initialise_parsimonious_states ( tree , character , states ) uppass ( tree , character ) results = [ ] result = { STATES : states , NUM_NODES : num_nodes , NUM_TIPS : num_tips } logger = logging . getLogger ( 'pastml' ) def process_result ( method , feature ) : out_feature = get_personalized_feature_name ( character , method ) if prediction_method != method else character res = result . copy ( ) res [ NUM_SCENARIOS ] , res [ NUM_UNRESOLVED_NODES ] , res [ NUM_STATES_PER_NODE ] = choose_parsimonious_states ( tree , feature , out_feature ) res [ NUM_STATES_PER_NODE ] /= num_nodes res [ PERC_UNRESOLVED ] = res [ NUM_UNRESOLVED_NODES ] * 100 / num_nodes logger . debug ( '{} node{} unresolved ({:.2f}%) for {} by {}, ' 'i.e. {:.4f} state{} per node in average.' . format ( res [ NUM_UNRESOLVED_NODES ] , 's are' if res [ NUM_UNRESOLVED_NODES ] != 1 else ' is' , res [ PERC_UNRESOLVED ] , character , method , res [ NUM_STATES_PER_NODE ] , 's' if res [ NUM_STATES_PER_NODE ] > 1 else '' ) ) res [ CHARACTER ] = out_feature res [ METHOD ] = method results . append ( res ) if prediction_method in { ACCTRAN , MP } : feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == MP : feature = get_personalized_feature_name ( feature , ACCTRAN ) acctran ( tree , character , feature ) result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( ACCTRAN , feature ) bu_feature = get_personalized_feature_name ( character , BU_PARS_STATES ) for node in tree . traverse ( ) : if prediction_method == ACCTRAN : node . del_feature ( bu_feature ) node . del_feature ( feature ) if prediction_method != ACCTRAN : downpass ( tree , character , states ) feature = get_personalized_feature_name ( character , PARS_STATES ) if prediction_method == DOWNPASS : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) if prediction_method in { DOWNPASS , MP } : process_result ( DOWNPASS , feature ) if prediction_method in { DELTRAN , MP } : deltran ( tree , character ) if prediction_method == DELTRAN : result [ STEPS ] = get_num_parsimonious_steps ( tree , feature ) process_result ( DELTRAN , feature ) for node in tree . traverse ( ) : node . del_feature ( feature ) logger . debug ( "Parsimonious reconstruction for {} requires {} state changes." . format ( character , result [ STEPS ] ) ) return results
Calculates parsimonious states on the tree and stores them in the corresponding feature .
756
18
8,547
def balance_to_ringchart_items ( balance , account = '' , show = SHOW_CREDIT ) : show = show if show else SHOW_CREDIT # cannot show all in ring chart rcis = [ ] for item in balance : subaccount = item [ 'account_fragment' ] if not account else ':' . join ( ( account , item [ 'account_fragment' ] ) ) ch = balance_to_ringchart_items ( item [ 'children' ] , subaccount , show ) amount = item [ 'balance' ] if show == SHOW_CREDIT else - item [ 'balance' ] if amount < 0 : continue # omit negative amounts wedge_amount = max ( amount , sum ( map ( float , ch ) ) ) rci = gtkchartlib . ringchart . RingChartItem ( wedge_amount , tooltip = '{}\n{}' . format ( subaccount , wedge_amount ) , items = ch ) rcis . append ( rci ) return rcis
Convert a balance data structure into RingChartItem objects .
216
12
8,548
def log_to_file ( log_path , log_urllib = False , limit = None ) : log_path = log_path file_handler = logging . FileHandler ( log_path ) if limit : file_handler = RotatingFileHandler ( log_path , mode = 'a' , maxBytes = limit * 1024 * 1024 , backupCount = 2 , encoding = None , delay = 0 ) fmt = '[%(asctime)s %(filename)18s] %(levelname)-7s - %(message)7s' date_fmt = '%Y-%m-%d %H:%M:%S' formatter = logging . Formatter ( fmt , datefmt = date_fmt ) file_handler . setFormatter ( formatter ) logger . addHandler ( file_handler ) if log_urllib : urllib_logger . addHandler ( file_handler ) urllib_logger . setLevel ( logging . DEBUG )
Add file_handler to logger
217
6
8,549
def session_context ( fn ) : @ functools . wraps ( fn ) def wrap ( * args , * * kwargs ) : session = args [ 0 ] . Session ( ) # obtain from self result = fn ( * args , session = session , * * kwargs ) session . close ( ) return result return wrap
Handles session setup and teardown
70
8
8,550
def _syscal_write_electrode_coords ( fid , spacing , N ) : fid . write ( '# X Y Z\n' ) for i in range ( 0 , N ) : fid . write ( '{0} {1} {2} {3}\n' . format ( i + 1 , i * spacing , 0 , 0 ) )
helper function that writes out electrode positions to a file descriptor
79
12
8,551
def _syscal_write_quadpoles ( fid , quadpoles ) : fid . write ( '# A B M N\n' ) for nr , quadpole in enumerate ( quadpoles ) : fid . write ( '{0} {1} {2} {3} {4}\n' . format ( nr , quadpole [ 0 ] , quadpole [ 1 ] , quadpole [ 2 ] , quadpole [ 3 ] ) )
helper function that writes the actual measurement configurations to a file descriptor .
100
14
8,552
def syscal_save_to_config_txt ( filename , configs , spacing = 1 ) : print ( 'Number of measurements: ' , configs . shape [ 0 ] ) number_of_electrodes = configs . max ( ) . astype ( int ) with open ( filename , 'w' ) as fid : _syscal_write_electrode_coords ( fid , spacing , number_of_electrodes ) _syscal_write_quadpoles ( fid , configs . astype ( int ) )
Write configurations to a Syscal ascii file that can be read by the Electre Pro program .
118
22
8,553
def setup ( use_latex = False , overwrite = False ) : # just make sure we can access matplotlib as mpl import matplotlib as mpl # general settings if overwrite : mpl . rcParams [ "lines.linewidth" ] = 2.0 mpl . rcParams [ "lines.markeredgewidth" ] = 3.0 mpl . rcParams [ "lines.markersize" ] = 3.0 mpl . rcParams [ "font.size" ] = 12 mpl . rcParams [ 'mathtext.default' ] = 'regular' if latex and use_latex : mpl . rcParams [ 'text.usetex' ] = True mpl . rc ( 'text.latex' , preamble = '' . join ( ( # r'\usepackage{droidsans} r'\usepackage[T1]{fontenc} ' , r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}' , r'\renewcommand\familydefault{\sfdefault} ' , r'\usepackage{mathastext} ' ) ) ) else : mpl . rcParams [ 'text.usetex' ] = False import matplotlib . pyplot as plt return plt , mpl
Set up matplotlib imports and settings .
292
9
8,554
def load_seit_data ( directory , frequency_file = 'frequencies.dat' , data_prefix = 'volt_' , * * kwargs ) : frequencies = np . loadtxt ( directory + os . sep + frequency_file ) data_files = sorted ( glob ( directory + os . sep + data_prefix + '*' ) ) # check that the number of frequencies matches the number of data files if frequencies . size != len ( data_files ) : raise Exception ( 'number of frequencies does not match number of data files' ) # load data data_list = [ ] for frequency , filename in zip ( frequencies , data_files ) : subdata = load_mod_file ( filename ) subdata [ 'frequency' ] = frequency data_list . append ( subdata ) df = pd . concat ( data_list ) return df , None , None
Load sEIT data from data directory . This function loads data previously exported from reda using reda . exporters . crtomo . write_files_to_directory
189
36
8,555
def get_diagonalisation ( frequencies , rate_matrix = None ) : Q = get_normalised_generator ( frequencies , rate_matrix ) d , A = np . linalg . eig ( Q ) return d , A , np . linalg . inv ( A )
Normalises and diagonalises the rate matrix .
64
9
8,556
def get_normalised_generator ( frequencies , rate_matrix = None ) : if rate_matrix is None : n = len ( frequencies ) rate_matrix = np . ones ( shape = ( n , n ) , dtype = np . float64 ) - np . eye ( n ) generator = rate_matrix * frequencies generator -= np . diag ( generator . sum ( axis = 1 ) ) mu = - generator . diagonal ( ) . dot ( frequencies ) generator /= mu return generator
Calculates the normalised generator from the rate matrix and character state frequencies .
108
16
8,557
def get_pij_matrix ( t , diag , A , A_inv ) : return A . dot ( np . diag ( np . exp ( diag * t ) ) ) . dot ( A_inv )
Calculates the probability matrix of substitutions i - > j over time t given the normalised generator diagonalisation .
49
24
8,558
def split_arguments ( args ) : prev = False for i , value in enumerate ( args [ 1 : ] ) : if value . startswith ( '-' ) : prev = True elif prev : prev = False else : return args [ : i + 1 ] , args [ i + 1 : ] return args , [ ]
Split specified arguments to two list .
71
7
8,559
def parse_arguments ( args , config ) : import notify from conf import config_to_options opts = config_to_options ( config ) usage = ( "%(prog)s " "[-h] [-t TO_ADDR] [-f FROM_ADDR] [-e ENCODING] [-s SUBJECT]\n" " " "[-o HOST] [-p PORT] [--username USERNAME] [--password PASSWORD]\n" " " "[--setup] [--check] COMMAND ARGUMENTS" ) % { 'prog' : "notify" } description = """ Call COMMAND with ARGUMENTS and send notification email to TO_ADDR """ parser = optparse . OptionParser ( usage = usage , description = description , version = notify . __version__ ) parser . add_option ( '-t' , '--to-addr' , default = opts . to_addr , help = ( 'Destination of the email.' ) ) parser . add_option ( '-f' , '--from-addr' , default = opts . from_addr , help = ( 'Source of the email.' ) ) parser . add_option ( '-s' , '--subject' , default = opts . subject , help = ( 'Subject of the email' ) ) parser . add_option ( '-e' , '--encoding' , default = opts . encoding , help = ( 'Encoding of the email' ) ) parser . add_option ( '-o' , '--host' , default = opts . host , help = ( 'Host address of MUA' ) ) parser . add_option ( '-p' , '--port' , type = 'int' , default = opts . port , help = ( 'Port number of MUA' ) ) parser . add_option ( '--username' , default = opts . username , help = ( 'Username for authentication' ) ) parser . add_option ( '--password' , help = ( 'Password for authentication' ) ) parser . add_option ( '--setup' , default = False , action = 'store_true' , help = ( 'Setup %(prog)s configuration' ) ) parser . add_option ( '--check' , default = False , action = 'store_true' , help = ( 'Send %(prog)s configuration via email for ' 'checking. Only for Unix system.' ) ) # display help and exit if len ( args ) == 1 : parser . print_help ( ) sys . exit ( 0 ) else : # translate all specified arguments to unicode if sys . version_info < ( 3 , ) : encoding = sys . stdout . encoding args = map ( lambda x : unicode ( x , encoding ) , args ) # split argv to two array lhs , rhs = split_arguments ( args ) # parse options opts = parser . parse_args ( args = lhs [ 1 : ] ) [ 0 ] return rhs , opts
Parse specified arguments via config
663
6
8,560
def should_require_authentication ( self , url ) : return ( not self . routes # require auth for all URLs or any ( route . match ( url ) for route in self . routes ) )
Returns True if we should require authentication for the URL given
42
11
8,561
def authenticate ( self , environ ) : try : hd = parse_dict_header ( environ [ 'HTTP_AUTHORIZATION' ] ) except ( KeyError , ValueError ) : return False return self . credentials_valid ( hd [ 'response' ] , environ [ 'REQUEST_METHOD' ] , environ [ 'httpauth.uri' ] , hd [ 'nonce' ] , hd [ 'Digest username' ] , )
Returns True if the credentials passed in the Authorization header are valid False otherwise .
103
15
8,562
def next ( self ) : try : return self . dict_to_xn ( self . csvreader . next ( ) ) except MetadataException : # row was metadata; proceed to next row return next ( self )
Return the next transaction object .
47
6
8,563
def parse_date ( self , date ) : if self . date_format is not None : return datetime . datetime . strptime ( date , self . date_format ) . date ( ) if re . match ( '\d{8}$' , date ) : # assume YYYYMMDD return datetime . date ( * map ( int , ( date [ : 4 ] , date [ 4 : 6 ] , date [ 6 : ] ) ) ) try : # split by '-' or '/' parts = date_delim . split ( date , 2 ) # maxsplit=2 if len ( parts ) == 3 : if len ( parts [ 0 ] ) == 4 : # YYYY, MM, DD return datetime . date ( * map ( int , parts ) ) elif len ( parts [ 2 ] ) == 4 : # DD, MM, YYYY return datetime . date ( * map ( int , reversed ( parts ) ) ) # fail except TypeError , ValueError : raise reader . DataError ( 'Bad date format: "{}"' . format ( date ) )
Parse the date and return a datetime object
235
10
8,564
def create ( self , uri , buffer = "queue" , interval = 10 ) : return self . _http_client . put_json ( "subscriptions/{}" . format ( self . short_name ) , { "subscription" : { "uri" : uri , "buffer" : buffer , "interval" : interval , } } )
Create a subscription with this short name and the provided parameters
78
11
8,565
def read_pal_version ( ) : verfile = os . path . join ( "cextern" , "pal" , "configure.ac" ) verstring = "-1.-1.-1" for line in open ( verfile ) : if line . startswith ( "AC_INIT" ) : # Version will be in string [nn.mm.pp] match = re . search ( r"\[(\d+\.\d+\.\d+)\]" , line ) if match : verstring = match . group ( 1 ) break ( major , minor , patch ) = verstring . split ( "." ) return ( verstring , major , minor , patch )
Scans the PAL configure . ac looking for the version number .
148
13
8,566
def _reset_model ( self , response ) : # pylint: disable=no-member # Reset the model to the initial state self . _provision_done = False # Set back the provision flag self . _changes . clear ( ) # Clear the changes # Process the raw data from the update response fields = self . process_raw_data ( response ) # Update the current model representation self . _set_fields ( fields ) # Lock the current model self . _provision_done = True
Update the fields value with the received information .
106
9
8,567
def is_ready ( self ) : if not self . provisioning_state : raise exception . ServiceException ( "The object doesn't contain " "`provisioningState`." ) elif self . provisioning_state == constant . FAILED : raise exception . ServiceException ( "Failed to complete the required operation." ) elif self . provisioning_state == constant . SUCCEEDED : LOG . debug ( "The model %s: %s was successfully updated " "(or created)." , self . __class__ . __name__ , self . resource_id ) return True return False
Check if the current model is ready to be used .
127
11
8,568
def _get_all ( cls , parent_id = None , grandparent_id = None ) : client = cls . _get_client ( ) endpoint = cls . _endpoint . format ( resource_id = "" , parent_id = parent_id or "" , grandparent_id = grandparent_id or "" ) resources = [ ] while True : response = client . get_resource ( endpoint ) for raw_data in response . get ( "value" , [ ] ) : raw_data [ "parentResourceID" ] = parent_id raw_data [ "grandParentResourceID" ] = grandparent_id resources . append ( cls . from_raw_data ( raw_data ) ) endpoint = response . get ( "nextLink" ) if not endpoint : break return resources
Retrives all the required resources .
173
8
8,569
def get ( cls , resource_id = None , parent_id = None , grandparent_id = None ) : if not resource_id : return cls . _get_all ( parent_id , grandparent_id ) else : return cls . _get ( resource_id , parent_id , grandparent_id )
Retrieves the required resources .
72
7
8,570
def refresh ( self ) : client = self . _get_client ( ) endpoint = self . _endpoint . format ( resource_id = self . resource_id or "" , parent_id = self . parent_id or "" , grandparent_id = self . grandparent_id or "" ) response = client . get_resource ( endpoint ) self . _reset_model ( response )
Get the latest representation of the current model .
83
9
8,571
def commit ( self , if_match = None , wait = True , timeout = None ) : if not self . _changes : LOG . debug ( "No changes available for %s: %s" , self . __class__ . __name__ , self . resource_id ) return LOG . debug ( "Apply all the changes on the current %s: %s" , self . __class__ . __name__ , self . resource_id ) client = self . _get_client ( ) endpoint = self . _endpoint . format ( resource_id = self . resource_id or "" , parent_id = self . parent_id or "" , grandparent_id = self . grandparent_id or "" ) request_body = self . dump ( include_read_only = False ) response = client . update_resource ( endpoint , data = request_body , if_match = if_match ) elapsed_time = 0 while wait : self . refresh ( ) # Update the representation of the current model if self . is_ready ( ) : break elapsed_time += CONFIG . HNV . retry_interval if timeout and elapsed_time > timeout : raise exception . TimeOut ( "The request timed out." ) time . sleep ( CONFIG . HNV . retry_interval ) else : self . _reset_model ( response ) # NOTE(alexcoman): In order to keep backwards compatibility the # `method: commit` will return a reference to itself. # An example for that can be the following use case: # label = client.Model().commit() return self
Apply all the changes on the current model .
337
9
8,572
def _set_fields ( self , fields ) : super ( _BaseHNVModel , self ) . _set_fields ( fields ) if not self . resource_ref : endpoint = self . _endpoint . format ( resource_id = self . resource_id , parent_id = self . parent_id , grandparent_id = self . grandparent_id ) self . resource_ref = re . sub ( "(/networking/v[0-9]+)" , "" , endpoint )
Set or update the fields value .
106
7
8,573
def get_resource ( self ) : references = { "resource_id" : None , "parent_id" : None , "grandparent_id" : None } for model_cls , regexp in self . _regexp . iteritems ( ) : match = regexp . search ( self . resource_ref ) if match is not None : references . update ( match . groupdict ( ) ) return model_cls . get ( * * references ) raise exception . NotFound ( "No model available for %(resource_ref)r" , resource_ref = self . resource_ref )
Return the associated resource .
129
5
8,574
def _get_nr_bins ( count ) : if count <= 30 : # use the square-root choice, used by Excel and Co k = np . ceil ( np . sqrt ( count ) ) else : # use Sturges' formula k = np . ceil ( np . log2 ( count ) ) + 1 return int ( k )
depending on the number of data points compute a best guess for an optimal number of bins
75
17
8,575
def plot_histograms ( ertobj , keys , * * kwargs ) : # you can either provide a DataFrame or an ERT object if isinstance ( ertobj , pd . DataFrame ) : df = ertobj else : df = ertobj . data if df . shape [ 0 ] == 0 : raise Exception ( 'No data present, cannot plot' ) if isinstance ( keys , str ) : keys = [ keys , ] figures = { } merge_figs = kwargs . get ( 'merge' , True ) if merge_figs : nr_x = 2 nr_y = len ( keys ) size_x = 15 / 2.54 size_y = 5 * nr_y / 2.54 fig , axes_all = plt . subplots ( nr_y , nr_x , figsize = ( size_x , size_y ) ) axes_all = np . atleast_2d ( axes_all ) for row_nr , key in enumerate ( keys ) : print ( 'Generating histogram plot for key: {0}' . format ( key ) ) subdata_raw = df [ key ] . values subdata = subdata_raw [ ~ np . isnan ( subdata_raw ) ] subdata = subdata [ np . isfinite ( subdata ) ] subdata_log10_with_nan = np . log10 ( subdata [ subdata > 0 ] ) subdata_log10 = subdata_log10_with_nan [ ~ np . isnan ( subdata_log10_with_nan ) ] subdata_log10 = subdata_log10 [ np . isfinite ( subdata_log10 ) ] if merge_figs : axes = axes_all [ row_nr ] . squeeze ( ) else : fig , axes = plt . subplots ( 1 , 2 , figsize = ( 10 / 2.54 , 5 / 2.54 ) ) ax = axes [ 0 ] ax . hist ( subdata , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( units . get_label ( key ) ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 5 ) ) ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 6 ) ax . tick_params ( axis = 'both' , which = 'minor' , labelsize = 6 ) if subdata_log10 . size > 0 : ax = axes [ 1 ] ax . hist ( subdata_log10 , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( r'$log_{10}($' + units . get_label ( key ) + ')' ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 5 ) ) else : pass # del(axes[1]) fig . tight_layout ( ) if not merge_figs : figures [ key ] = fig if merge_figs : figures [ 'all' ] = fig return figures
Generate histograms for one or more keys in the given container .
717
14
8,576
def plot_histograms_extra_dims ( dataobj , keys , * * kwargs ) : if isinstance ( dataobj , pd . DataFrame ) : df_raw = dataobj else : df_raw = dataobj . data if kwargs . get ( 'subquery' , False ) : df = df_raw . query ( kwargs . get ( 'subquery' ) ) else : df = df_raw split_timestamps = True if split_timestamps : group_timestamps = df . groupby ( 'timestep' ) N_ts = len ( group_timestamps . groups . keys ( ) ) else : group_timestamps = ( 'all' , df ) N_ts = 1 columns = keys N_c = len ( columns ) plot_log10 = kwargs . get ( 'log10plot' , False ) if plot_log10 : transformers = [ 'lin' , 'log10' ] N_log10 = 2 else : transformers = [ 'lin' , ] N_log10 = 1 # determine layout of plots Nx_max = kwargs . get ( 'Nx' , 4 ) N = N_ts * N_c * N_log10 Nx = min ( Nx_max , N ) Ny = int ( np . ceil ( N / Nx ) ) size_x = 5 * Nx / 2.54 size_y = 5 * Ny / 2.54 fig , axes = plt . subplots ( Ny , Nx , figsize = ( size_x , size_y ) , sharex = True , sharey = True ) axes = np . atleast_2d ( axes ) index = 0 for ts_name , tgroup in group_timestamps : for column in columns : for transformer in transformers : # print('{0}-{1}-{2}'.format(ts_name, column, transformer)) subdata_raw = tgroup [ column ] . values subdata = subdata_raw [ ~ np . isnan ( subdata_raw ) ] subdata = subdata [ np . isfinite ( subdata ) ] if transformer == 'log10' : subdata_log10_with_nan = np . log10 ( subdata [ subdata > 0 ] ) subdata_log10 = subdata_log10_with_nan [ ~ np . isnan ( subdata_log10_with_nan ) ] subdata_log10 = subdata_log10 [ np . isfinite ( subdata_log10 ) ] subdata = subdata_log10 ax = axes . flat [ index ] ax . hist ( subdata , _get_nr_bins ( subdata . size ) , ) ax . set_xlabel ( units . get_label ( column ) ) ax . set_ylabel ( 'count' ) ax . xaxis . set_major_locator ( mpl . ticker . MaxNLocator ( 3 ) ) ax . tick_params ( axis = 'both' , which = 'major' , labelsize = 6 ) ax . tick_params ( axis = 'both' , which = 'minor' , labelsize = 6 ) ax . set_title ( "timestep: %d" % ts_name ) index += 1 # remove some labels for ax in axes [ : , 1 : ] . flat : ax . set_ylabel ( '' ) for ax in axes [ : - 1 , : ] . flat : ax . set_xlabel ( '' ) fig . tight_layout ( ) return fig
Produce histograms grouped by the extra dimensions .
784
10
8,577
def parse_substring ( allele , pred , max_len = None ) : result = "" pos = 0 if max_len is None : max_len = len ( allele ) else : max_len = min ( max_len , len ( allele ) ) while pos < max_len and pred ( allele [ pos ] ) : result += allele [ pos ] pos += 1 return result , allele [ pos : ]
Extract substring of letters for which predicate is True
87
11
8,578
def fetch ( self ) : if not self . local_path : self . make_local_path ( ) fetcher = BookFetcher ( self ) fetcher . fetch ( )
just pull files from PG
39
5
8,579
def make ( self ) : logger . debug ( "preparing to add all git files" ) num_added = self . local_repo . add_all_files ( ) if num_added : self . local_repo . commit ( "Initial import from Project Gutenberg" ) file_handler = NewFilesHandler ( self ) file_handler . add_new_files ( ) num_added = self . local_repo . add_all_files ( ) if num_added : self . local_repo . commit ( "Updates Readme, contributing, license files, cover, metadata." )
turn fetched files into a local repo make auxiliary files
129
11
8,580
def push ( self ) : self . github_repo . create_and_push ( ) self . _repo = self . github_repo . repo return self . _repo
create a github repo and push the local repo into it
40
11
8,581
def tag ( self , version = 'bump' , message = '' ) : self . clone_from_github ( ) self . github_repo . tag ( version , message = message )
tag and commit
41
3
8,582
def format_title ( self ) : def asciify ( _title ) : _title = unicodedata . normalize ( 'NFD' , unicode ( _title ) ) ascii = True out = [ ] ok = u"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- '," for ch in _title : if ch in ok : out . append ( ch ) elif unicodedata . category ( ch ) [ 0 ] == ( "L" ) : #a letter out . append ( hex ( ord ( ch ) ) ) ascii = False elif ch in u'\r\n\t' : out . append ( u'-' ) return ( ascii , sub ( "[ ',-]+" , '-' , "" . join ( out ) ) ) ( ascii , _title ) = asciify ( self . meta . title ) if not ascii and self . meta . alternative_title : ( ascii , _title2 ) = asciify ( self . meta . alternative_title ) if ascii : _title = _title2 title_length = 99 - len ( str ( self . book_id ) ) - 1 if len ( _title ) > title_length : # if the title was shortened, replace the trailing _ with an ellipsis repo_title = "{0}__{1}" . format ( _title [ : title_length ] , self . book_id ) else : repo_title = "{0}_{1}" . format ( _title [ : title_length ] , self . book_id ) logger . debug ( "%s %s" % ( len ( repo_title ) , repo_title ) ) self . meta . metadata [ '_repo' ] = repo_title return repo_title
Takes a string and sanitizes it for Github s url name format
407
15
8,583
def _request ( self , path , method , body = None ) : url = '/' . join ( [ _SERVER , path ] ) ( resp , content ) = _HTTP . request ( url , method , headers = self . _headers , body = body ) content_type = resp . get ( 'content-type' ) if content_type and content_type . startswith ( 'application/json' ) : content = json . loads ( content . decode ( 'UTF-8' ) ) return ( resp , content )
Make a request from the API .
113
7
8,584
def put ( self , path , payload ) : body = json . dumps ( payload ) return self . _request ( path , 'PUT' , body )
Make a PUT request from the API .
32
9
8,585
def post ( self , path , payload ) : body = json . dumps ( payload ) return self . _request ( path , 'POST' , body )
Make a POST request from the API .
32
8
8,586
def create_child ( self , modules ) : binder = self . _binder . create_child ( ) return Injector ( modules , binder = binder , stage = self . _stage )
Create a new injector that inherits the state from this injector .
44
15
8,587
def validate ( self , message , schema_name ) : err = None try : jsonschema . validate ( message , self . schemas [ schema_name ] ) except KeyError : msg = ( f'Schema "{schema_name}" was not found (available: ' f'{", ".join(self.schemas.keys())})' ) err = { 'msg' : msg } except jsonschema . ValidationError as e : msg = ( f'Given message was not valid against the schema ' f'"{schema_name}": {e.message}' ) err = { 'msg' : msg } if err : logging . error ( * * err ) raise exceptions . InvalidMessageError ( err [ 'msg' ] )
Validate a message given a schema .
163
8
8,588
def compose ( * functions ) : def inner ( func1 , func2 ) : return lambda * x , * * y : func1 ( func2 ( * x , * * y ) ) return functools . reduce ( inner , functions )
evaluates functions from right to left .
51
8
8,589
def validate_instance ( instance ) : excludes = settings . AUTOMATED_LOGGING [ 'exclude' ] [ 'model' ] for excluded in excludes : if ( excluded in [ instance . _meta . app_label . lower ( ) , instance . __class__ . __name__ . lower ( ) ] or instance . __module__ . lower ( ) . startswith ( excluded ) ) : return False return True
Validating if the instance should be logged or is excluded
90
11
8,590
def get_current_user ( ) : thread_local = AutomatedLoggingMiddleware . thread_local if hasattr ( thread_local , 'current_user' ) : user = thread_local . current_user if isinstance ( user , AnonymousUser ) : user = None else : user = None return user
Get current user object from middleware
67
7
8,591
def get_current_environ ( ) : thread_local = AutomatedLoggingMiddleware . thread_local if hasattr ( thread_local , 'request_uri' ) : request_uri = thread_local . request_uri else : request_uri = None if hasattr ( thread_local , 'application' ) : application = thread_local . application application = Application . objects . get_or_create ( name = application ) [ 0 ] else : application = None if hasattr ( thread_local , 'method' ) : method = thread_local . method else : method = None if hasattr ( thread_local , 'status' ) : status = thread_local . status else : status = None return request_uri , application , method , status
Get current application and path object from middleware
161
9
8,592
def processor ( status , sender , instance , updated = None , addition = '' ) : logger = logging . getLogger ( __name__ ) if validate_instance ( instance ) : user = get_current_user ( ) application = instance . _meta . app_label model_name = instance . __class__ . __name__ level = settings . AUTOMATED_LOGGING [ 'loglevel' ] [ 'model' ] if status == 'change' : corrected = 'changed' elif status == 'add' : corrected = 'added' elif status == 'delete' : corrected = 'deleted' logger . log ( level , ( '%s %s %s(%s) in %s%s' % ( user , corrected , instance , model_name , application , addition ) ) . replace ( " " , " " ) , extra = { 'action' : 'model' , 'data' : { 'status' : status , 'user' : user , 'sender' : sender , 'instance' : instance , 'update_fields' : updated } } )
This is the standard logging processor .
234
7
8,593
def parents ( self ) : parents = [ ] if self . parent is None : return [ ] category = self while category . parent is not None : parents . append ( category . parent ) category = category . parent return parents [ : : - 1 ]
Returns a list of all the current category s parents .
52
11
8,594
def root_parent ( self , category = None ) : return next ( filter ( lambda c : c . is_root , self . hierarchy ( ) ) )
Returns the topmost parent of the current category .
33
10
8,595
def active ( self ) -> bool : states = self . _client . get_state ( self . _state_url ) [ 'states' ] for state in states : state = state [ 'State' ] if int ( state [ 'Id' ] ) == self . _state_id : # yes, the ZM API uses the *string* "1" for this... return state [ 'IsActive' ] == "1" return False
Indicate if this RunState is currently active .
94
10
8,596
def to_reasonable_unit ( value , units , round_digits = 2 ) : def to_unit ( unit ) : return float ( value ) / unit [ 1 ] exponents = [ abs ( Decimal ( to_unit ( u ) ) . adjusted ( ) - 1 ) for u in units ] best = min ( enumerate ( exponents ) , key = itemgetter ( 1 ) ) [ 0 ] return dict ( val = round ( to_unit ( units [ best ] ) , round_digits ) , label = units [ best ] [ 0 ] , multiplier = units [ best ] [ 1 ] )
Convert a value to the most reasonable unit .
131
10
8,597
def get_text ( self ) : done_units = to_reasonable_unit ( self . done , self . units ) current = round ( self . current / done_units [ 'multiplier' ] , 2 ) percent = int ( self . current * 100 / self . done ) return '{0:.2f} of {1:.2f} {2} ({3}%)' . format ( current , done_units [ 'val' ] , done_units [ 'label' ] , percent )
Return extended progress bar text
110
5
8,598
def add_progress ( self , delta , done = None ) : if done is not None : self . done = done self . bar . current = max ( min ( self . done , self . current + delta ) , 0 ) self . rate_display . set_text ( self . rate_text ) self . remaining_time_display . set_text ( self . remaining_time_text ) return self . current == self . done
Add to the current progress amount
92
6
8,599
async def valid_token_set ( self ) : is_valid = False if self . _auth_client . token : # Account for a token near expiration now = datetime . datetime . utcnow ( ) skew = datetime . timedelta ( seconds = 60 ) if self . _auth_client . expiry > ( now + skew ) : is_valid = True return is_valid
Check for validity of token and refresh if none or expired .
85
12