idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
236,600
def from_json ( cls , data ) : # Check required and optional keys optional_keys = { 'wind_direction' : 0 , 'rain' : False , 'snow_on_ground' : False } assert 'wind_speed' in data , 'Required key "wind_speed" is missing!' for key , val in optional_keys . items ( ) : if key not in data : data [ key ] = val return cls ( data [ 'wind_speed' ] , data [ 'wind_direction' ] , data [ 'rain' ] , data [ 'snow_on_ground' ] )
Create a Wind Condition from a dictionary .
133
8
236,601
def to_json ( self ) : return { 'wind_speed' : self . wind_speed , 'wind_direction' : self . wind_direction , 'rain' : self . rain , 'snow_on_ground' : self . snow_on_ground }
Convert the Wind Condition to a dictionary .
59
9
236,602
def _get_datetimes ( self , timestep = 1 ) : start_moy = DateTime ( self . _month , self . _day_of_month ) . moy if timestep == 1 : start_moy = start_moy + 30 num_moys = 24 * timestep return tuple ( DateTime . from_moy ( start_moy + ( i * ( 1 / timestep ) * 60 ) ) for i in xrange ( num_moys ) )
List of datetimes based on design day date and timestep .
111
14
236,603
def from_analysis_period ( cls , analysis_period , clearness = 1 , daylight_savings_indicator = 'No' ) : _check_analysis_period ( analysis_period ) return cls ( analysis_period . st_month , analysis_period . st_day , clearness , daylight_savings_indicator )
Initialize a OriginalClearSkyCondition from an analysis_period
74
12
236,604
def radiation_values ( self , location , timestep = 1 ) : # create sunpath and get altitude at every timestep of the design day sp = Sunpath . from_location ( location ) altitudes = [ ] dates = self . _get_datetimes ( timestep ) for t_date in dates : sun = sp . calculate_sun_from_date_time ( t_date ) altitudes . append ( sun . altitude ) dir_norm , diff_horiz = ashrae_clear_sky ( altitudes , self . _month , self . _clearness ) glob_horiz = [ dhr + dnr * math . sin ( math . radians ( alt ) ) for alt , dnr , dhr in zip ( altitudes , dir_norm , diff_horiz ) ] return dir_norm , diff_horiz , glob_horiz
Lists of driect normal diffuse horiz and global horiz rad at each timestep .
189
20
236,605
def from_analysis_period ( cls , analysis_period , tau_b , tau_d , daylight_savings_indicator = 'No' ) : _check_analysis_period ( analysis_period ) return cls ( analysis_period . st_month , analysis_period . st_day , tau_b , tau_d , daylight_savings_indicator )
Initialize a RevisedClearSkyCondition from an analysis_period
86
12
236,606
def convert_to_unit ( self , unit ) : self . _values = self . _header . data_type . to_unit ( self . _values , unit , self . _header . unit ) self . _header . _unit = unit
Convert the Data Collection to the input unit .
53
10
236,607
def convert_to_ip ( self ) : self . _values , self . _header . _unit = self . _header . data_type . to_ip ( self . _values , self . _header . unit )
Convert the Data Collection to IP units .
48
9
236,608
def convert_to_si ( self ) : self . _values , self . _header . _unit = self . _header . data_type . to_si ( self . _values , self . _header . unit )
Convert the Data Collection to SI units .
48
9
236,609
def to_unit ( self , unit ) : new_data_c = self . duplicate ( ) new_data_c . convert_to_unit ( unit ) return new_data_c
Return a Data Collection in the input unit .
41
9
236,610
def is_in_data_type_range ( self , raise_exception = True ) : return self . _header . data_type . is_in_range ( self . _values , self . _header . unit , raise_exception )
Check if collection values are in physically possible ranges for the data_type .
54
15
236,611
def get_highest_values ( self , count ) : count = int ( count ) assert count <= len ( self . _values ) , 'count must be smaller than or equal to values length. {} > {}.' . format ( count , len ( self . _values ) ) assert count > 0 , 'count must be greater than 0. Got {}.' . format ( count ) highest_values = sorted ( self . _values , reverse = True ) [ 0 : count ] highest_values_index = sorted ( list ( xrange ( len ( self . _values ) ) ) , key = lambda k : self . _values [ k ] , reverse = True ) [ 0 : count ] return highest_values , highest_values_index
Get a list of the the x highest values of the Data Collection and their indices .
154
17
236,612
def get_lowest_values ( self , count ) : count = int ( count ) assert count <= len ( self . _values ) , 'count must be <= to Data Collection len. {} > {}.' . format ( count , len ( self . _values ) ) assert count > 0 , 'count must be greater than 0. Got {}.' . format ( count ) lowest_values = sorted ( self . _values ) [ 0 : count ] lowest_values_index = sorted ( list ( xrange ( len ( self . _values ) ) ) , key = lambda k : self . _values [ k ] ) [ 0 : count ] return lowest_values , lowest_values_index
Get a list of the the x lowest values of the Data Collection and their indices .
145
17
236,613
def get_percentile ( self , percentile ) : assert 0 <= percentile <= 100 , 'percentile must be between 0 and 100. Got {}' . format ( percentile ) return self . _percentile ( self . _values , percentile )
Get a value representing a the input percentile of the Data Collection .
50
13
236,614
def get_aligned_collection ( self , value = 0 , data_type = None , unit = None , mutable = None ) : # set up the header of the new collection header = self . _check_aligned_header ( data_type , unit ) # set up the values of the new collection values = self . _check_aligned_value ( value ) # get the correct base class for the aligned collection (mutable or immutable) if mutable is None : collection = self . __class__ ( header , values , self . datetimes ) else : if self . _enumeration is None : self . _get_mutable_enumeration ( ) if mutable is False : col_obj = self . _enumeration [ 'immutable' ] [ self . _collection_type ] else : col_obj = self . _enumeration [ 'mutable' ] [ self . _collection_type ] collection = col_obj ( header , values , self . datetimes ) collection . _validated_a_period = self . _validated_a_period return collection
Return a Collection aligned with this one composed of one repeated value .
233
13
236,615
def duplicate ( self ) : collection = self . __class__ ( self . header . duplicate ( ) , self . values , self . datetimes ) collection . _validated_a_period = self . _validated_a_period return collection
Return a copy of the current Data Collection .
52
9
236,616
def to_json ( self ) : return { 'header' : self . header . to_json ( ) , 'values' : self . _values , 'datetimes' : self . datetimes , 'validated_a_period' : self . _validated_a_period }
Convert Data Collection to a dictionary .
62
8
236,617
def filter_collections_by_statement ( data_collections , statement ) : pattern = BaseCollection . pattern_from_collections_and_statement ( data_collections , statement ) collections = [ coll . filter_by_pattern ( pattern ) for coll in data_collections ] return collections
Generate a filtered data collections according to a conditional statement .
64
12
236,618
def pattern_from_collections_and_statement ( data_collections , statement ) : BaseCollection . are_collections_aligned ( data_collections ) correct_var = BaseCollection . _check_conditional_statement ( statement , len ( data_collections ) ) # replace the operators of the statement with non-alphanumeric characters # necessary to avoid replacing the characters of the operators num_statement_clean = BaseCollection . _replace_operators ( statement ) pattern = [ ] for i in xrange ( len ( data_collections [ 0 ] ) ) : num_statement = num_statement_clean # replace the variable names with their numerical values for j , coll in enumerate ( data_collections ) : var = correct_var [ j ] num_statement = num_statement . replace ( var , str ( coll [ i ] ) ) # put back the operators num_statement = BaseCollection . _restore_operators ( num_statement ) pattern . append ( eval ( num_statement , { } ) ) return pattern
Generate a list of booleans from data collections and a conditional statement .
222
16
236,619
def are_collections_aligned ( data_collections , raise_exception = True ) : if len ( data_collections ) > 1 : first_coll = data_collections [ 0 ] for coll in data_collections [ 1 : ] : if not first_coll . is_collection_aligned ( coll ) : if raise_exception is True : error_msg = '{} Data Collection is not aligned with ' '{} Data Collection.' . format ( first_coll . header . data_type , coll . header . data_type ) raise ValueError ( error_msg ) return False return True
Test if a series of Data Collections are aligned with one another .
131
13
236,620
def compute_function_aligned ( funct , data_collections , data_type , unit ) : # check that all inputs are either data collections or floats data_colls = [ ] for i , func_input in enumerate ( data_collections ) : if isinstance ( func_input , BaseCollection ) : data_colls . append ( func_input ) else : try : data_collections [ i ] = float ( func_input ) except ValueError : raise TypeError ( 'Expected a number or a Data Colleciton. ' 'Got {}' . format ( type ( func_input ) ) ) # run the function and return the result if len ( data_colls ) == 0 : return funct ( * data_collections ) else : BaseCollection . are_collections_aligned ( data_colls ) val_len = len ( data_colls [ 0 ] . values ) for i , col in enumerate ( data_collections ) : data_collections [ i ] = [ col ] * val_len if isinstance ( col , float ) else col result = data_colls [ 0 ] . get_aligned_collection ( data_type = data_type , unit = unit ) for i in xrange ( val_len ) : result [ i ] = funct ( * [ col [ i ] for col in data_collections ] ) return result
Compute a function with a list of aligned data collections or individual values .
299
15
236,621
def _check_conditional_statement ( statement , num_collections ) : # Determine what the list of variables should be based on the num_collections correct_var = list ( ascii_lowercase ) [ : num_collections ] # Clean out the operators of the statement st_statement = BaseCollection . _remove_operators ( statement ) parsed_st = [ s for s in st_statement if s . isalpha ( ) ] # Perform the check for var in parsed_st : if var not in correct_var : raise ValueError ( 'Invalid conditional statement: {}\n ' 'Statement should be a valid Python statement' ' and the variables should be named as follows: {}' . format ( statement , ', ' . join ( correct_var ) ) ) return correct_var
Method to check conditional statements to be sure that they are valid .
170
13
236,622
def _filter_by_statement ( self , statement ) : self . __class__ . _check_conditional_statement ( statement , 1 ) _filt_values , _filt_datetimes = [ ] , [ ] for i , a in enumerate ( self . _values ) : if eval ( statement , { 'a' : a } ) : _filt_values . append ( a ) _filt_datetimes . append ( self . datetimes [ i ] ) return _filt_values , _filt_datetimes
Filter the data collection based on a conditional statement .
117
10
236,623
def _filter_by_pattern ( self , pattern ) : try : _len = len ( pattern ) except TypeError : raise TypeError ( "pattern is not a list of Booleans. Got {}" . format ( type ( pattern ) ) ) _filt_values = [ d for i , d in enumerate ( self . _values ) if pattern [ i % _len ] ] _filt_datetimes = [ d for i , d in enumerate ( self . datetimes ) if pattern [ i % _len ] ] return _filt_values , _filt_datetimes
Filter the Filter the Data Collection based on a list of booleans .
126
15
236,624
def _check_aligned_header ( self , data_type , unit ) : if data_type is not None : assert isinstance ( data_type , DataTypeBase ) , 'data_type must be a Ladybug DataType. Got {}' . format ( type ( data_type ) ) if unit is None : unit = data_type . units [ 0 ] else : data_type = self . header . data_type unit = unit or self . header . unit return Header ( data_type , unit , self . header . analysis_period , self . header . metadata )
Check the header inputs whenever get_aligned_collection is called .
123
13
236,625
def _check_aligned_value ( self , value ) : if isinstance ( value , Iterable ) and not isinstance ( value , ( str , dict , bytes , bytearray ) ) : assert len ( value ) == len ( self . _values ) , "Length of value ({}) must match " "the length of this collection's values ({})" . format ( len ( value ) , len ( self . _values ) ) values = value else : values = [ value ] * len ( self . _values ) return values
Check the value input whenever get_aligned_collection is called .
113
13
236,626
def from_json ( cls , data ) : if 'month' not in data : data [ 'month' ] = 1 if 'day' not in data : data [ 'day' ] = 1 if 'hour' not in data : data [ 'hour' ] = 0 if 'minute' not in data : data [ 'minute' ] = 0 if 'year' not in data : data [ 'year' ] = 2017 leap_year = True if int ( data [ 'year' ] ) == 2016 else False return cls ( data [ 'month' ] , data [ 'day' ] , data [ 'hour' ] , data [ 'minute' ] , leap_year )
Creat datetime from a dictionary .
146
7
236,627
def from_hoy ( cls , hoy , leap_year = False ) : return cls . from_moy ( round ( hoy * 60 ) , leap_year )
Create Ladybug Datetime from an hour of the year .
40
12
236,628
def from_moy ( cls , moy , leap_year = False ) : if not leap_year : num_of_minutes_until_month = ( 0 , 44640 , 84960 , 129600 , 172800 , 217440 , 260640 , 305280 , 349920 , 393120 , 437760 , 480960 , 525600 ) else : num_of_minutes_until_month = ( 0 , 44640 , 84960 + 1440 , 129600 + 1440 , 172800 + 1440 , 217440 + 1440 , 260640 + 1440 , 305280 + 1440 , 349920 + 1440 , 393120 + 1440 , 437760 + 1440 , 480960 + 1440 , 525600 + 1440 ) # find month for monthCount in range ( 12 ) : if int ( moy ) < num_of_minutes_until_month [ monthCount + 1 ] : month = monthCount + 1 break try : day = int ( ( moy - num_of_minutes_until_month [ month - 1 ] ) / ( 60 * 24 ) ) + 1 except UnboundLocalError : raise ValueError ( "moy must be positive and smaller than 525600. Invalid input %d" % ( moy ) ) else : hour = int ( ( moy / 60 ) % 24 ) minute = int ( moy % 60 ) return cls ( month , day , hour , minute , leap_year )
Create Ladybug Datetime from a minute of the year .
318
12
236,629
def from_date_time_string ( cls , datetime_string , leap_year = False ) : dt = datetime . strptime ( datetime_string , '%d %b %H:%M' ) return cls ( dt . month , dt . day , dt . hour , dt . minute , leap_year )
Create Ladybug DateTime from a DateTime string .
79
11
236,630
def _calculate_hour_and_minute ( float_hour ) : hour , minute = int ( float_hour ) , int ( round ( ( float_hour - int ( float_hour ) ) * 60 ) ) if minute == 60 : return hour + 1 , 0 else : return hour , minute
Calculate hour and minutes as integers from a float hour .
65
13
236,631
def add_minute ( self , minute ) : _moy = self . moy + int ( minute ) return self . __class__ . from_moy ( _moy )
Create a new DateTime after the minutes are added .
39
11
236,632
def to_json ( self ) : return { 'year' : self . year , 'month' : self . month , 'day' : self . day , 'hour' : self . hour , 'minute' : self . minute }
Get date time as a dictionary .
50
7
236,633
def fullConn ( self , preCellsTags , postCellsTags , connParam ) : from . . import sim if sim . cfg . verbose : print ( 'Generating set of all-to-all connections (rule: %s) ...' % ( connParam [ 'label' ] ) ) # get list of params that have a lambda function paramsStrFunc = [ param for param in [ p + 'Func' for p in self . connStringFuncParams ] if param in connParam ] for paramStrFunc in paramsStrFunc : # replace lambda function (with args as dict of lambda funcs) with list of values connParam [ paramStrFunc [ : - 4 ] + 'List' ] = { ( preGid , postGid ) : connParam [ paramStrFunc ] ( * * { k : v if isinstance ( v , Number ) else v ( preCellTags , postCellTags ) for k , v in connParam [ paramStrFunc + 'Vars' ] . items ( ) } ) for preGid , preCellTags in preCellsTags . items ( ) for postGid , postCellTags in postCellsTags . items ( ) } for postCellGid in postCellsTags : # for each postsyn cell if postCellGid in self . gid2lid : # check if postsyn is in this node's list of gids for preCellGid , preCellTags in preCellsTags . items ( ) : # for each presyn cell self . _addCellConn ( connParam , preCellGid , postCellGid )
Generates connections between all pre and post - syn cells
352
11
236,634
def fromListConn ( self , preCellsTags , postCellsTags , connParam ) : from . . import sim if sim . cfg . verbose : print ( 'Generating set of connections from list (rule: %s) ...' % ( connParam [ 'label' ] ) ) orderedPreGids = sorted ( preCellsTags ) orderedPostGids = sorted ( postCellsTags ) # list of params that can have a lambda function paramsStrFunc = [ param for param in [ p + 'Func' for p in self . connStringFuncParams ] if param in connParam ] for paramStrFunc in paramsStrFunc : # replace lambda function (with args as dict of lambda funcs) with list of values connParam [ paramStrFunc [ : - 4 ] + 'List' ] = { ( orderedPreGids [ preId ] , orderedPostGids [ postId ] ) : connParam [ paramStrFunc ] ( * * { k : v if isinstance ( v , Number ) else v ( preCellsTags [ orderedPreGids [ preId ] ] , postCellsTags [ orderedPostGids [ postId ] ] ) for k , v in connParam [ paramStrFunc + 'Vars' ] . items ( ) } ) for preId , postId in connParam [ 'connList' ] } if 'weight' in connParam and isinstance ( connParam [ 'weight' ] , list ) : connParam [ 'weightFromList' ] = list ( connParam [ 'weight' ] ) # if weight is a list, copy to weightFromList if 'delay' in connParam and isinstance ( connParam [ 'delay' ] , list ) : connParam [ 'delayFromList' ] = list ( connParam [ 'delay' ] ) # if delay is a list, copy to delayFromList if 'loc' in connParam and isinstance ( connParam [ 'loc' ] , list ) : connParam [ 'locFromList' ] = list ( connParam [ 'loc' ] ) # if delay is a list, copy to locFromList for iconn , ( relativePreId , relativePostId ) in enumerate ( connParam [ 'connList' ] ) : # for each postsyn cell preCellGid = orderedPreGids [ relativePreId ] postCellGid = orderedPostGids [ relativePostId ] if postCellGid in self . gid2lid : # check if postsyn is in this node's list of gids if 'weightFromList' in connParam : connParam [ 'weight' ] = connParam [ 'weightFromList' ] [ iconn ] if 'delayFromList' in connParam : connParam [ 'delay' ] = connParam [ 'delayFromList' ] [ iconn ] if 'locFromList' in connParam : connParam [ 'loc' ] = connParam [ 'locFromList' ] [ iconn ] if preCellGid != postCellGid : # if not self-connection self . _addCellConn ( connParam , preCellGid , postCellGid )
Generates connections between all pre and post - syn cells based list of relative cell ids
678
18
236,635
def setImembPtr ( self ) : jseg = 0 for sec in list ( self . secs . values ( ) ) : hSec = sec [ 'hObj' ] for iseg , seg in enumerate ( hSec ) : self . imembPtr . pset ( jseg , seg . _ref_i_membrane_ ) # notice the underscore at the end (in nA) jseg += 1
Set PtrVector to point to the i_membrane_
96
14
236,636
def saveWeights ( sim ) : with open ( sim . weightsfilename , 'w' ) as fid : for weightdata in sim . allWeights : fid . write ( '%0.0f' % weightdata [ 0 ] ) # Time for i in range ( 1 , len ( weightdata ) ) : fid . write ( '\t%0.8f' % weightdata [ i ] ) fid . write ( '\n' ) print ( ( 'Saved weights as %s' % sim . weightsfilename ) )
Save the weights for each plastic synapse
114
8
236,637
def validateFunction ( strFunc , netParamsVars ) : from math import exp , log , sqrt , sin , cos , tan , asin , acos , atan , sinh , cosh , tanh , pi , e rand = h . Random ( ) stringFuncRandMethods = [ 'binomial' , 'discunif' , 'erlang' , 'geometric' , 'hypergeo' , 'lognormal' , 'negexp' , 'normal' , 'poisson' , 'uniform' , 'weibull' ] for randmeth in stringFuncRandMethods : strFunc = strFunc . replace ( randmeth , 'rand.' + randmeth ) variables = { "pre_x" : 1 , "pre_y" : 1 , "pre_z" : 1 , "post_x" : 1 , "post_y" : 1 , "post_z" : 1 , "dist_x" : 1 , "dist_y" : 1 , "dist_z" : 1 , "pre_xnorm" : 1 , "pre_ynorm" : 1 , "pre_znorm" : 1 , "post_xnorm" : 1 , "post_ynorm" : 1 , "post_znorm" : 1 , "dist_xnorm" : 1 , "dist_ynorm" : 1 , "dist_znorm" : 1 , "dist_3D" : 1 , "dist_3D_border" : 1 , "dist_2D" : 1 , "dist_norm3D" : 1 , "dist_norm2D" : 1 , "rand" : rand , "exp" : exp , "log" : log , "sqrt" : sqrt , "sin" : sin , "cos" : cos , "tan" : tan , "asin" : asin , "acos" : acos , "atan" : atan , "sinh" : sinh , "cosh" : cosh , "tanh" : tanh , "pi" : pi , "e" : e } # add netParams variables for k , v in netParamsVars . items ( ) : if isinstance ( v , Number ) : variables [ k ] = v try : eval ( strFunc , variables ) return True except : return False
returns True if strFunc can be evaluated
517
10
236,638
def bandpass ( data , freqmin , freqmax , df , corners = 4 , zerophase = True ) : fe = 0.5 * df low = freqmin / fe high = freqmax / fe # raise for some bad scenarios if high - 1.0 > - 1e-6 : msg = ( "Selected high corner frequency ({}) of bandpass is at or " "above Nyquist ({}). Applying a high-pass instead." ) . format ( freqmax , fe ) warnings . warn ( msg ) return highpass ( data , freq = freqmin , df = df , corners = corners , zerophase = zerophase ) if low > 1 : msg = "Selected low corner frequency is above Nyquist." raise ValueError ( msg ) z , p , k = iirfilter ( corners , [ low , high ] , btype = 'band' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data )
Butterworth - Bandpass Filter .
285
8
236,639
def bandstop ( data , freqmin , freqmax , df , corners = 4 , zerophase = False ) : fe = 0.5 * df low = freqmin / fe high = freqmax / fe # raise for some bad scenarios if high > 1 : high = 1.0 msg = "Selected high corner frequency is above Nyquist. " + "Setting Nyquist as high corner." warnings . warn ( msg ) if low > 1 : msg = "Selected low corner frequency is above Nyquist." raise ValueError ( msg ) z , p , k = iirfilter ( corners , [ low , high ] , btype = 'bandstop' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data )
Butterworth - Bandstop Filter .
232
8
236,640
def lowpass ( data , freq , df , corners = 4 , zerophase = False ) : fe = 0.5 * df f = freq / fe # raise for some bad scenarios if f > 1 : f = 1.0 msg = "Selected corner frequency is above Nyquist. " + "Setting Nyquist as high corner." warnings . warn ( msg ) z , p , k = iirfilter ( corners , f , btype = 'lowpass' , ftype = 'butter' , output = 'zpk' ) sos = zpk2sos ( z , p , k ) if zerophase : firstpass = sosfilt ( sos , data ) return sosfilt ( sos , firstpass [ : : - 1 ] ) [ : : - 1 ] else : return sosfilt ( sos , data )
Butterworth - Lowpass Filter .
190
8
236,641
def integer_decimation ( data , decimation_factor ) : if not isinstance ( decimation_factor , int ) : msg = "Decimation_factor must be an integer!" raise TypeError ( msg ) # reshape and only use every decimation_factor-th sample data = np . array ( data [ : : decimation_factor ] ) return data
Downsampling by applying a simple integer decimation .
77
11
236,642
def _distributeCells ( numCellsPop ) : from . . import sim hostCells = { } for i in range ( sim . nhosts ) : hostCells [ i ] = [ ] for i in range ( numCellsPop ) : hostCells [ sim . nextHost ] . append ( i ) sim . nextHost += 1 if sim . nextHost >= sim . nhosts : sim . nextHost = 0 if sim . cfg . verbose : print ( ( "Distributed population of %i cells on %s hosts: %s, next: %s" % ( numCellsPop , sim . nhosts , hostCells , sim . nextHost ) ) ) return hostCells
distribute cells across compute nodes using round - robin
155
11
236,643
def getCSD ( lfps , sampr , minf = 0.05 , maxf = 300 , norm = True , vaknin = False , spacing = 1.0 ) : datband = getbandpass ( lfps , sampr , minf , maxf ) if datband . shape [ 0 ] > datband . shape [ 1 ] : # take CSD along smaller dimension ax = 1 else : ax = 0 # can change default to run Vaknin on bandpass filtered LFPs before calculating CSD, that # way would have same number of channels in CSD and LFP (but not critical, and would take more RAM); if vaknin : datband = Vaknin ( datband ) if norm : removemean ( datband , ax = ax ) # NB: when drawing CSD make sure that negative values (depolarizing intracellular current) drawn in red, # and positive values (hyperpolarizing intracellular current) drawn in blue CSD = - numpy . diff ( datband , n = 2 , axis = ax ) / spacing ** 2 # now each column (or row) is an electrode -- CSD along electrodes return CSD
get current source density approximation using set of local field potentials with equidistant spacing first performs a lowpass filter lfps is a list or numpy array of LFPs arranged spatially by column spacing is in microns
253
46
236,644
def createSynapses ( self ) : synsoma = h . ExpSyn ( self . soma ( 0.5 ) ) synsoma . tau = 2 synsoma . e = 0 syndend = h . ExpSyn ( self . dend ( 0.5 ) ) syndend . tau = 2 syndend . e = 0 self . synlist . append ( synsoma ) # synlist is defined in Cell self . synlist . append ( syndend )
Add an exponentially decaying synapse
101
6
236,645
def createNetcon ( self , thresh = 10 ) : nc = h . NetCon ( self . soma ( 0.5 ) . _ref_v , None , sec = self . soma ) nc . threshold = thresh return nc
created netcon to record spikes
55
6
236,646
def createSections ( self ) : self . soma = h . Section ( name = 'soma' , cell = self ) self . dend = h . Section ( name = 'dend' , cell = self )
Create the sections of the cell .
49
7
236,647
def defineGeometry ( self ) : self . soma . L = 18.8 self . soma . diam = 18.8 self . soma . Ra = 123.0 self . dend . L = 200.0 self . dend . diam = 1.0 self . dend . Ra = 100.0
Set the 3D geometry of the cell .
68
9
236,648
def defineBiophysics ( self ) : # Insert active Hodgkin-Huxley current in the soma self . soma . insert ( 'hh' ) self . soma . gnabar_hh = 0.12 # Sodium conductance in S/cm2 self . soma . gkbar_hh = 0.036 # Potassium conductance in S/cm2 self . soma . gl_hh = 0.003 # Leak conductance in S/cm2 self . soma . el_hh = - 70 # Reversal potential in mV self . dend . insert ( 'pas' ) self . dend . g_pas = 0.001 # Passive conductance in S/cm2 self . dend . e_pas = - 65 # Leak reversal potential mV self . dend . nseg = 1000
Assign the membrane properties across the cell .
182
9
236,649
def shapeplot ( h , ax , sections = None , order = 'pre' , cvals = None , clim = None , cmap = cm . YlOrBr_r , legend = True , * * kwargs ) : # meanLineWidth=1.0, maxLineWidth=10.0, # Default is to plot all sections. if sections is None : if order == 'pre' : sections = allsec_preorder ( h ) # Get sections in "pre-order" else : sections = list ( h . allsec ( ) ) # Determine color limits if cvals is not None and clim is None : clim = [ np . nanmin ( cvals ) , np . nanmax ( cvals ) ] # Plot each segement as a line lines = [ ] i = 0 allDiams = [ ] for sec in sections : allDiams . append ( get_section_diams ( h , sec ) ) #maxDiams = max([max(d) for d in allDiams]) #meanDiams = np.mean([np.mean(d) for d in allDiams]) for isec , sec in enumerate ( sections ) : xyz = get_section_path ( h , sec ) seg_paths = interpolate_jagged ( xyz , sec . nseg ) diams = allDiams [ isec ] # represent diams as linewidths linewidths = diams # linewidth is in points so can use actual diams to plot # linewidths = [min(d/meanDiams*meanLineWidth, maxLineWidth) for d in diams] # use if want to scale size for ( j , path ) in enumerate ( seg_paths ) : line , = plt . plot ( path [ : , 0 ] , path [ : , 1 ] , path [ : , 2 ] , '-k' , * * kwargs ) try : line . set_linewidth ( linewidths [ j ] ) except : pass if cvals is not None : if isinstance ( cvals [ i ] , numbers . Number ) : # map number to colormap try : col = cmap ( int ( ( cvals [ i ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) except : col = cmap ( 0 ) else : # use input directly. E.g. if user specified color with a string. col = cvals [ i ] line . set_color ( col ) lines . append ( line ) i += 1 return lines
Plots a 3D shapeplot
570
7
236,650
def shapeplot_animate ( v , lines , nframes = None , tscale = 'linear' , clim = [ - 80 , 50 ] , cmap = cm . YlOrBr_r ) : if nframes is None : nframes = v . shape [ 0 ] if tscale == 'linear' : def animate ( i ) : i_t = int ( ( i / nframes ) * v . shape [ 0 ] ) for i_seg in range ( v . shape [ 1 ] ) : lines [ i_seg ] . set_color ( cmap ( int ( ( v [ i_t , i_seg ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) ) return [ ] elif tscale == 'log' : def animate ( i ) : i_t = int ( np . round ( ( v . shape [ 0 ] ** ( 1.0 / ( nframes - 1 ) ) ) ** i - 1 ) ) for i_seg in range ( v . shape [ 1 ] ) : lines [ i_seg ] . set_color ( cmap ( int ( ( v [ i_t , i_seg ] - clim [ 0 ] ) * 255 / ( clim [ 1 ] - clim [ 0 ] ) ) ) ) return [ ] else : raise ValueError ( "Unrecognized option '%s' for tscale" % tscale ) return animate
Returns animate function which updates color of shapeplot
312
9
236,651
def mark_locations ( h , section , locs , markspec = 'or' , * * kwargs ) : # get list of cartesian coordinates specifying section path xyz = get_section_path ( h , section ) ( r , theta , phi ) = sequential_spherical ( xyz ) rcum = np . append ( 0 , np . cumsum ( r ) ) # convert locs into lengths from the beginning of the path if type ( locs ) is float or type ( locs ) is np . float64 : locs = np . array ( [ locs ] ) if type ( locs ) is list : locs = np . array ( locs ) lengths = locs * rcum [ - 1 ] # find cartesian coordinates for markers xyz_marks = [ ] for targ_length in lengths : xyz_marks . append ( find_coord ( targ_length , xyz , rcum , theta , phi ) ) xyz_marks = np . array ( xyz_marks ) # plot markers line , = plt . plot ( xyz_marks [ : , 0 ] , xyz_marks [ : , 1 ] , xyz_marks [ : , 2 ] , markspec , * * kwargs ) return line
Marks one or more locations on along a section . Could be used to mark the location of a recording or electrical stimulation .
275
25
236,652
def root_sections ( h ) : roots = [ ] for section in h . allsec ( ) : sref = h . SectionRef ( sec = section ) # has_parent returns a float... cast to bool if sref . has_parent ( ) < 0.9 : roots . append ( section ) return roots
Returns a list of all sections that have no parent .
67
11
236,653
def leaf_sections ( h ) : leaves = [ ] for section in h . allsec ( ) : sref = h . SectionRef ( sec = section ) # nchild returns a float... cast to bool if sref . nchild ( ) < 0.9 : leaves . append ( section ) return leaves
Returns a list of all sections that have no children .
65
11
236,654
def root_indices ( sec_list ) : roots = [ ] for i , section in enumerate ( sec_list ) : sref = h . SectionRef ( sec = section ) # has_parent returns a float... cast to bool if sref . has_parent ( ) < 0.9 : roots . append ( i ) return roots
Returns the index of all sections without a parent .
73
10
236,655
def branch_order ( h , section , path = [ ] ) : path . append ( section ) sref = h . SectionRef ( sec = section ) # has_parent returns a float... cast to bool if sref . has_parent ( ) < 0.9 : return 0 # section is a root else : nchild = len ( list ( h . SectionRef ( sec = sref . parent ) . child ) ) if nchild <= 1.1 : return branch_order ( h , sref . parent , path ) else : return 1 + branch_order ( h , sref . parent , path )
Returns the branch order of a section
130
7
236,656
def createCells ( self ) : # add individual cells if 'cellsList' in self . tags : cells = self . createCellsList ( ) # create cells based on fixed number of cells elif 'numCells' in self . tags : cells = self . createCellsFixedNum ( ) # create cells based on density (optional ynorm-dep) elif 'density' in self . tags : cells = self . createCellsDensity ( ) # create cells based on density (optional ynorm-dep) elif 'gridSpacing' in self . tags : cells = self . createCellsGrid ( ) # not enough tags to create cells else : self . tags [ 'numCells' ] = 1 print ( 'Warninig: number or density of cells not specified for population %s; defaulting to numCells = 1' % ( self . tags [ 'pop' ] ) ) cells = self . createCellsFixedNum ( ) return cells
Function to instantiate Cell objects based on the characteristics of this population
208
13
236,657
def createCellsList ( self ) : from . . import sim cells = [ ] self . tags [ 'numCells' ] = len ( self . tags [ 'cellsList' ] ) for i in self . _distributeCells ( len ( self . tags [ 'cellsList' ] ) ) [ sim . rank ] : #if 'cellModel' in self.tags['cellsList'][i]: # self.cellModelClass = getattr(f, self.tags['cellsList'][i]['cellModel']) # select cell class to instantiate cells based on the cellModel tags gid = sim . net . lastGid + i self . cellGids . append ( gid ) # add gid list of cells belonging to this population - not needed? cellTags = { k : v for ( k , v ) in self . tags . items ( ) if k in sim . net . params . popTagsCopiedToCells } # copy all pop tags to cell tags, except those that are pop-specific cellTags [ 'pop' ] = self . tags [ 'pop' ] cellTags . update ( self . tags [ 'cellsList' ] [ i ] ) # add tags specific to this cells for coord in [ 'x' , 'y' , 'z' ] : if coord in cellTags : # if absolute coord exists cellTags [ coord + 'norm' ] = cellTags [ coord ] / getattr ( sim . net . params , 'size' + coord . upper ( ) ) # calculate norm coord elif coord + 'norm' in cellTags : # elif norm coord exists cellTags [ coord ] = cellTags [ coord + 'norm' ] * getattr ( sim . net . params , 'size' + coord . upper ( ) ) # calculate norm coord else : cellTags [ coord + 'norm' ] = cellTags [ coord ] = 0 if 'cellModel' in self . tags . keys ( ) and self . tags [ 'cellModel' ] == 'Vecstim' : # if VecStim, copy spike times to params cellTags [ 'params' ] [ 'spkTimes' ] = self . tags [ 'cellsList' ] [ i ] [ 'spkTimes' ] cells . append ( self . cellModelClass ( gid , cellTags ) ) # instantiate Cell object if sim . cfg . verbose : print ( ( 'Cell %d/%d (gid=%d) of pop %d, on node %d, ' % ( i , self . tags [ 'numCells' ] - 1 , gid , i , sim . rank ) ) ) sim . net . lastGid = sim . net . lastGid + len ( self . tags [ 'cellsList' ] ) return cells
Create population cells based on list of individual cells
596
9
236,658
def create ( netParams = None , simConfig = None , output = False ) : from . . import sim import __main__ as top if not netParams : netParams = top . netParams if not simConfig : simConfig = top . simConfig sim . initialize ( netParams , simConfig ) # create network object and set cfg and net params pops = sim . net . createPops ( ) # instantiate network populations cells = sim . net . createCells ( ) # instantiate network cells based on defined populations conns = sim . net . connectCells ( ) # create connections between cells based on params stims = sim . net . addStims ( ) # add external stimulation to cells (IClamps etc) rxd = sim . net . addRxD ( ) # add reaction-diffusion (RxD) simData = sim . setupRecording ( ) # setup variables to record for each cell (spikes, V traces, etc) if output : return ( pops , cells , conns , rxd , stims , simData )
Sequence of commands to create network
231
7
236,659
def intervalSimulate ( interval ) : from . . import sim sim . runSimWithIntervalFunc ( interval , sim . intervalSave ) # run parallel Neuron simulation #this gather is justa merging of files sim . fileGather ( )
Sequence of commands to simulate network
52
7
236,660
def load ( filename , simConfig = None , output = False , instantiate = True , createNEURONObj = True ) : from . . import sim sim . initialize ( ) # create network object and set cfg and net params sim . cfg . createNEURONObj = createNEURONObj sim . loadAll ( filename , instantiate = instantiate , createNEURONObj = createNEURONObj ) if simConfig : sim . setSimCfg ( simConfig ) # set after to replace potentially loaded cfg if len ( sim . net . cells ) == 0 and instantiate : pops = sim . net . createPops ( ) # instantiate network populations cells = sim . net . createCells ( ) # instantiate network cells based on defined populations conns = sim . net . connectCells ( ) # create connections between cells based on params stims = sim . net . addStims ( ) # add external stimulation to cells (IClamps etc) rxd = sim . net . addRxD ( ) # add reaction-diffusion (RxD) simData = sim . setupRecording ( ) # setup variables to record for each cell (spikes, V traces, etc) if output : try : return ( pops , cells , conns , stims , rxd , simData ) except : pass
Sequence of commands load simulate and analyse network
285
9
236,661
def createExportNeuroML2 ( netParams = None , simConfig = None , reference = None , connections = True , stimulations = True , output = False , format = 'xml' ) : from . . import sim import __main__ as top if not netParams : netParams = top . netParams if not simConfig : simConfig = top . simConfig sim . initialize ( netParams , simConfig ) # create network object and set cfg and net params pops = sim . net . createPops ( ) # instantiate network populations cells = sim . net . createCells ( ) # instantiate network cells based on defined populations conns = sim . net . connectCells ( ) # create connections between cells based on params stims = sim . net . addStims ( ) # add external stimulation to cells (IClamps etc) rxd = sim . net . addRxD ( ) # add reaction-diffusion (RxD) simData = sim . setupRecording ( ) # setup variables to record for each cell (spikes, V traces, etc) sim . exportNeuroML2 ( reference , connections , stimulations , format ) # export cells and connectivity to NeuroML 2 format if output : return ( pops , cells , conns , stims , rxd , simData )
Sequence of commands to create and export network to NeuroML2
282
13
236,662
def exception ( function ) : @ functools . wraps ( function ) def wrapper ( * args , * * kwargs ) : try : return function ( * args , * * kwargs ) except Exception as e : # print err = "There was an exception in %s():" % ( function . __name__ ) print ( ( "%s \n %s \n%s" % ( err , e , sys . exc_info ( ) ) ) ) return - 1 return wrapper
A decorator that wraps the passed in function and prints exception should one occur
104
15
236,663
def getSpktSpkid ( cellGids = [ ] , timeRange = None , allCells = False ) : from . . import sim import pandas as pd try : # Pandas 0.24 and later from pandas import _lib as pandaslib except : # Pandas 0.23 and earlier from pandas import lib as pandaslib df = pd . DataFrame ( pandaslib . to_object_array ( [ sim . allSimData [ 'spkt' ] , sim . allSimData [ 'spkid' ] ] ) . transpose ( ) , columns = [ 'spkt' , 'spkid' ] ) #df = pd.DataFrame(pd.lib.to_object_array([sim.allSimData['spkt'], sim.allSimData['spkid']]).transpose(), columns=['spkt', 'spkid']) if timeRange : min , max = [ int ( df [ 'spkt' ] . searchsorted ( timeRange [ i ] ) ) for i in range ( 2 ) ] # binary search faster than query else : # timeRange None or empty list means all times min , max = 0 , len ( df ) if len ( cellGids ) == 0 or allCells : # get all by either using flag or giving empty list -- can get rid of the flag sel = df [ min : max ] else : sel = df [ min : max ] . query ( 'spkid in @cellGids' ) return sel , sel [ 'spkt' ] . tolist ( ) , sel [ 'spkid' ] . tolist ( )
return spike ids and times ; with allCells = True just need to identify slice of time so can omit cellGids
356
26
236,664
def calcTransferResistance ( self , gid , seg_coords ) : sigma = 0.3 # mS/mm # Value used in NEURON extracellular recording example ("extracellular_stim_and_rec") # rho = 35.4 # ohm cm, squid axon cytoplasm = 2.8249e-2 S/cm = 0.028 S/cm = 0.0028 S/mm = 2.8 mS/mm # rho_um = 35.4 * 0.01 = 35.4 / 1e6 * 1e4 = 0.354 Mohm um ~= 3 uS / um = 3000 uS / mm = 3 mS /mm # equivalent sigma value (~3) is 10x larger than Allen (0.3) # if use same sigma value, results are consistent r05 = ( seg_coords [ 'p0' ] + seg_coords [ 'p1' ] ) / 2 dl = seg_coords [ 'p1' ] - seg_coords [ 'p0' ] nseg = r05 . shape [ 1 ] tr = np . zeros ( ( self . nsites , nseg ) ) # tr_NEURON = np.zeros((self.nsites,nseg)) # used to compare with NEURON extracellular example for j in range ( self . nsites ) : # calculate mapping for each site on the electrode rel = np . expand_dims ( self . pos [ : , j ] , axis = 1 ) # coordinates of a j-th site on the electrode rel_05 = rel - r05 # distance between electrode and segment centers r2 = np . einsum ( 'ij,ij->j' , rel_05 , rel_05 ) # compute dot product column-wise, the resulting array has as many columns as original rlldl = np . einsum ( 'ij,ij->j' , rel_05 , dl ) # compute dot product column-wise, the resulting array has as many columns as original dlmag = np . linalg . norm ( dl , axis = 0 ) # length of each segment rll = abs ( rlldl / dlmag ) # component of r parallel to the segment axis it must be always positive rT2 = r2 - rll ** 2 # square of perpendicular component up = rll + dlmag / 2 low = rll - dlmag / 2 num = up + np . sqrt ( up ** 2 + rT2 ) den = low + np . sqrt ( low ** 2 + rT2 ) tr [ j , : ] = np . log ( num / den ) / dlmag # units of (1/um) use with imemb_ (total seg current) # Consistent with NEURON extracellular recording example # r = np.sqrt(rel_05[0,:]**2 + rel_05[1,:]**2 + rel_05[2,:]**2) # tr_NEURON[j, :] = (rho / 4 / math.pi)*(1/r)*0.01 tr *= 1 / ( 4 * math . pi * sigma ) # units: 1/um / (mS/mm) = mm/um / mS = 1e3 * kOhm = MOhm self . transferResistances [ gid ] = tr
Precompute mapping from segment to electrode locations
754
9
236,665
def importConnFromExcel ( fileName , sheetName ) : import openpyxl as xl # set columns colPreTags = 0 # 'A' colPostTags = 1 # 'B' colConnFunc = 2 # 'C' colSyn = 3 # 'D' colProb = 5 # 'F' colWeight = 6 # 'G' colAnnot = 8 # 'I' outFileName = fileName [ : - 5 ] + '_' + sheetName + '.py' # set output file name connText = """## Generated using importConnFromExcel() function in params/utils.py \n\nnetParams['connParams'] = [] \n\n""" # open excel file and sheet wb = xl . load_workbook ( fileName ) sheet = wb . get_sheet_by_name ( sheetName ) numRows = sheet . get_highest_row ( ) with open ( outFileName , 'w' ) as f : f . write ( connText ) # write starting text for row in range ( 1 , numRows + 1 ) : if sheet . cell ( row = row , column = colProb ) . value : # if not empty row print ( 'Creating conn rule for row ' + str ( row ) ) # read row values pre = sheet . cell ( row = row , column = colPreTags ) . value post = sheet . cell ( row = row , column = colPostTags ) . value func = sheet . cell ( row = row , column = colConnFunc ) . value syn = sheet . cell ( row = row , column = colSyn ) . value prob = sheet . cell ( row = row , column = colProb ) . value weight = sheet . cell ( row = row , column = colWeight ) . value # write preTags line = "netParams['connParams'].append({'preConds': {" for i , cond in enumerate ( pre . split ( ';' ) ) : # split into different conditions if i > 0 : line = line + ", " cond2 = cond . split ( '=' ) # split into key and value line = line + "'" + cond2 [ 0 ] . replace ( ' ' , '' ) + "': " + cond2 [ 1 ] . replace ( ' ' , '' ) # generate line line = line + "}" # end of preTags # write postTags line = line + ",\n'postConds': {" for i , cond in enumerate ( post . split ( ';' ) ) : # split into different conditions if i > 0 : line = line + ", " cond2 = cond . split ( '=' ) # split into key and value line = line + "'" + cond2 [ 0 ] . replace ( ' ' , '' ) + "': " + cond2 [ 1 ] . replace ( ' ' , '' ) # generate line line = line + "}" # end of postTags line = line + ",\n'connFunc': '" + func + "'" # write connFunc line = line + ",\n'synMech': '" + syn + "'" # write synReceptor line = line + ",\n'probability': " + str ( prob ) # write prob line = line + ",\n'weight': " + str ( weight ) # write prob line = line + "})" # add closing brackets line = line + '\n\n' # new line after each conn rule f . write ( line )
Import connectivity rules from Excel sheet
757
6
236,666
def safe_dump ( data , stream = None , * * kwds ) : return yaml . dump ( data , stream = stream , Dumper = ODYD , * * kwds )
implementation of safe dumper using Ordered Dict Yaml Dumper
42
15
236,667
def dump ( data , * * kwds ) : if _usedefaultyamlloader : return yaml . safe_dump ( data , * * kwds ) else : return odyldo . safe_dump ( data , * * kwds )
dump the data as YAML
59
7
236,668
def bibtex ( self ) : warnings . warn ( "bibtex should be queried with ads.ExportQuery(); You will " "hit API ratelimits very quickly otherwise." , UserWarning ) return ExportQuery ( bibcodes = self . bibcode , format = "bibtex" ) . execute ( )
Return a BiBTeX entry for the current article .
68
11
236,669
def get_pdf ( article , debug = False ) : print ( 'Retrieving {0}' . format ( article ) ) identifier = [ _ for _ in article . identifier if 'arXiv' in _ ] if identifier : url = 'http://arXiv.org/pdf/{0}.{1}' . format ( identifier [ 0 ] [ 9 : 13 ] , '' . join ( _ for _ in identifier [ 0 ] [ 14 : ] if _ . isdigit ( ) ) ) else : # No arXiv version. Ask ADS to redirect us to the journal article. params = { 'bibcode' : article . bibcode , 'link_type' : 'ARTICLE' , 'db_key' : 'AST' } url = requests . get ( 'http://adsabs.harvard.edu/cgi-bin/nph-data_query' , params = params ) . url q = requests . get ( url ) if not q . ok : print ( 'Error retrieving {0}: {1} for {2}' . format ( article , q . status_code , url ) ) if debug : q . raise_for_status ( ) else : return None # Check if the journal has given back forbidden HTML. if q . content . endswith ( '</html>' ) : print ( 'Error retrieving {0}: 200 (access denied?) for {1}' . format ( article , url ) ) return None return q . content
Download an article PDF from arXiv .
316
9
236,670
def summarise_pdfs ( pdfs ) : # Ignore None. print ( 'Summarising {0} articles ({1} had errors)' . format ( len ( pdfs ) , pdfs . count ( None ) ) ) pdfs = [ _ for _ in pdfs if _ is not None ] summary = PdfFileWriter ( ) for pdf in pdfs : summary . addPage ( PdfFileReader ( StringIO ( pdf ) ) . getPage ( 0 ) ) return summary
Collate the first page from each of the PDFs provided into a single PDF .
104
17
236,671
def execute ( self ) : self . response = MetricsResponse . load_http_response ( self . session . post ( self . HTTP_ENDPOINT , data = self . json_payload ) ) return self . response . metrics
Execute the http request to the metrics service
51
9
236,672
def get_info ( cls ) : return '\n' . join ( [ str ( cls . _instances [ key ] ) for key in cls . _instances ] )
Print all of the instantiated Singletons
41
9
236,673
def load_http_response ( cls , http_response ) : if not http_response . ok : raise APIResponseError ( http_response . text ) c = cls ( http_response ) c . response = http_response RateLimits . getRateLimits ( cls . __name__ ) . set ( c . response . headers ) return c
This method should return an instantiated class and set its response to the requests . Response object .
79
19
236,674
def token ( self ) : if self . _token is None : for v in map ( os . environ . get , TOKEN_ENVIRON_VARS ) : if v is not None : self . _token = v return self . _token for f in TOKEN_FILES : try : with open ( f ) as fp : self . _token = fp . read ( ) . strip ( ) return self . _token except IOError : pass if ads . config . token is not None : self . _token = ads . config . token return self . _token warnings . warn ( "No token found" , RuntimeWarning ) return self . _token
set the instance attribute token following the following logic stopping whenever a token is found . Raises NoTokenFound is no token is found - environment variables TOKEN_ENVIRON_VARS - file containing plaintext as the contents in TOKEN_FILES - ads . config . token
143
59
236,675
def session ( self ) : if self . _session is None : self . _session = requests . session ( ) self . _session . headers . update ( { "Authorization" : "Bearer {}" . format ( self . token ) , "User-Agent" : "ads-api-client/{}" . format ( __version__ ) , "Content-Type" : "application/json" , } ) return self . _session
http session interface transparent proxy to requests . session
94
9
236,676
def from_csv ( input_csv_pattern , headers = None , schema_file = None ) : if headers is not None : names = headers elif schema_file is not None : with _util . open_local_or_gcs ( schema_file , mode = 'r' ) as f : schema = json . load ( f ) names = [ x [ 'name' ] for x in schema ] else : raise ValueError ( 'Either headers or schema_file is needed' ) metrics = Metrics ( input_csv_pattern = input_csv_pattern , headers = names ) return metrics
Create a Metrics instance from csv file pattern .
128
11
236,677
def from_bigquery ( sql ) : if isinstance ( sql , bq . Query ) : sql = sql . _expanded_sql ( ) parts = sql . split ( '.' ) if len ( parts ) == 1 or len ( parts ) > 3 or any ( ' ' in x for x in parts ) : sql = '(' + sql + ')' # query, not a table name else : sql = '`' + sql + '`' # table name metrics = Metrics ( bigquery = sql ) return metrics
Create a Metrics instance from a bigquery query or table .
112
13
236,678
def _get_data_from_csv_files ( self ) : all_df = [ ] for file_name in self . _input_csv_files : with _util . open_local_or_gcs ( file_name , mode = 'r' ) as f : all_df . append ( pd . read_csv ( f , names = self . _headers ) ) df = pd . concat ( all_df , ignore_index = True ) return df
Get data from input csv files .
104
8
236,679
def _get_data_from_bigquery ( self , queries ) : all_df = [ ] for query in queries : all_df . append ( query . execute ( ) . result ( ) . to_dataframe ( ) ) df = pd . concat ( all_df , ignore_index = True ) return df
Get data from bigquery table or query .
70
9
236,680
def _expanded_sql ( self ) : if not self . _sql : self . _sql = UDF . _build_udf ( self . _name , self . _code , self . _return_type , self . _params , self . _language , self . _imports ) return self . _sql
Get the expanded BigQuery SQL string of this UDF
69
11
236,681
def _build_udf ( name , code , return_type , params , language , imports ) : params = ',' . join ( [ '%s %s' % named_param for named_param in params ] ) imports = ',' . join ( [ 'library="%s"' % i for i in imports ] ) if language . lower ( ) == 'sql' : udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + 'RETURNS {return_type}\n' + 'AS (\n' + '{code}\n' + ');' else : udf = 'CREATE TEMPORARY FUNCTION {name} ({params})\n' + 'RETURNS {return_type}\n' + 'LANGUAGE {language}\n' + 'AS """\n' + '{code}\n' + '"""\n' + 'OPTIONS (\n' + '{imports}\n' + ');' return udf . format ( name = name , params = params , return_type = return_type , language = language , code = code , imports = imports )
Creates the UDF part of a BigQuery query using its pieces
255
14
236,682
def created_on ( self ) : s = self . _info . get ( 'timeCreated' , None ) return dateutil . parser . parse ( s ) if s else None
The created timestamp of the bucket as a datetime . datetime .
38
14
236,683
def metadata ( self ) : if self . _info is None : try : self . _info = self . _api . buckets_get ( self . _name ) except Exception as e : raise e return BucketMetadata ( self . _info ) if self . _info else None
Retrieves metadata about the bucket .
59
8
236,684
def object ( self , key ) : return _object . Object ( self . _name , key , context = self . _context )
Retrieves a Storage Object for the specified key in this bucket .
28
14
236,685
def objects ( self , prefix = None , delimiter = None ) : return _object . Objects ( self . _name , prefix , delimiter , context = self . _context )
Get an iterator for the objects within this bucket .
38
10
236,686
def delete ( self ) : if self . exists ( ) : try : self . _api . buckets_delete ( self . _name ) except Exception as e : raise e
Deletes the bucket .
36
5
236,687
def contains ( self , name ) : try : self . _api . buckets_get ( name ) except google . datalab . utils . RequestException as e : if e . status == 404 : return False raise e except Exception as e : raise e return True
Checks if the specified bucket exists .
56
8
236,688
def item ( self , key ) : return _item . Item ( self . _name , key , context = self . _context )
Retrieves an Item object for the specified key in this bucket .
28
14
236,689
def items ( self , prefix = None , delimiter = None ) : return _item . Items ( self . _name , prefix , delimiter , context = self . _context )
Get an iterator for the items within this bucket .
38
10
236,690
def create ( self , project_id = None ) : if not self . exists ( ) : if project_id is None : project_id = self . _api . project_id try : self . _info = self . _api . buckets_insert ( self . _name , project_id = project_id ) except Exception as e : raise e return self
Creates the bucket .
77
5
236,691
def create ( self , name ) : return Bucket ( name , context = self . _context ) . create ( self . _project_id )
Creates a new bucket .
30
6
236,692
def train ( train_dataset , eval_dataset , analysis_dir , output_dir , features , layer_sizes , max_steps = 5000 , num_epochs = None , train_batch_size = 100 , eval_batch_size = 16 , min_eval_frequency = 100 , learning_rate = 0.01 , epsilon = 0.0005 , job_name = None , cloud = None , ) : job = train_async ( train_dataset = train_dataset , eval_dataset = eval_dataset , analysis_dir = analysis_dir , output_dir = output_dir , features = features , layer_sizes = layer_sizes , max_steps = max_steps , num_epochs = num_epochs , train_batch_size = train_batch_size , eval_batch_size = eval_batch_size , min_eval_frequency = min_eval_frequency , learning_rate = learning_rate , epsilon = epsilon , job_name = job_name , cloud = cloud , ) job . wait ( ) print ( 'Training: ' + str ( job . state ) )
Blocking version of train_async . See documentation for train_async .
262
17
236,693
def list ( self , pattern = '*' ) : if self . _descriptors is None : self . _descriptors = self . _client . list_resource_descriptors ( filter_string = self . _filter_string ) return [ resource for resource in self . _descriptors if fnmatch . fnmatch ( resource . type , pattern ) ]
Returns a list of resource descriptors that match the filters .
79
12
236,694
def _gcs_list_buckets ( project , pattern ) : data = [ { 'Bucket' : 'gs://' + bucket . name , 'Created' : bucket . metadata . created_on } for bucket in google . datalab . storage . Buckets ( _make_context ( project ) ) if fnmatch . fnmatch ( bucket . name , pattern ) ] return google . datalab . utils . commands . render_dictionary ( data , [ 'Bucket' , 'Created' ] )
List all Google Cloud Storage buckets that match a pattern .
111
11
236,695
def _gcs_list_keys ( bucket , pattern ) : data = [ { 'Name' : obj . metadata . name , 'Type' : obj . metadata . content_type , 'Size' : obj . metadata . size , 'Updated' : obj . metadata . updated_on } for obj in _gcs_get_keys ( bucket , pattern ) ] return google . datalab . utils . commands . render_dictionary ( data , [ 'Name' , 'Type' , 'Size' , 'Updated' ] )
List all Google Cloud Storage keys in a specified bucket that match a pattern .
115
15
236,696
def prepare_image_transforms ( element , image_columns ) : import base64 import cStringIO from PIL import Image from tensorflow . python . lib . io import file_io as tf_file_io from apache_beam . metrics import Metrics img_error_count = Metrics . counter ( 'main' , 'ImgErrorCount' ) img_missing_count = Metrics . counter ( 'main' , 'ImgMissingCount' ) for name in image_columns : uri = element [ name ] if not uri : img_missing_count . inc ( ) continue try : with tf_file_io . FileIO ( uri , 'r' ) as f : img = Image . open ( f ) . convert ( 'RGB' ) # A variety of different calling libraries throw different exceptions here. # They all correspond to an unreadable file so we treat them equivalently. # pylint: disable broad-except except Exception as e : logging . exception ( 'Error processing image %s: %s' , uri , str ( e ) ) img_error_count . inc ( ) return # Convert to desired format and output. output = cStringIO . StringIO ( ) img . save ( output , 'jpeg' ) element [ name ] = base64 . urlsafe_b64encode ( output . getvalue ( ) ) return element
Replace an images url with its jpeg bytes .
299
11
236,697
def decode_csv ( csv_string , column_names ) : import csv r = next ( csv . reader ( [ csv_string ] ) ) if len ( r ) != len ( column_names ) : raise ValueError ( 'csv line %s does not have %d columns' % ( csv_string , len ( column_names ) ) ) return { k : v for k , v in zip ( column_names , r ) }
Parse a csv line into a dict .
98
10
236,698
def encode_csv ( data_dict , column_names ) : import csv import six values = [ str ( data_dict [ x ] ) for x in column_names ] str_buff = six . StringIO ( ) writer = csv . writer ( str_buff , lineterminator = '' ) writer . writerow ( values ) return str_buff . getvalue ( )
Builds a csv string .
81
7
236,699
def serialize_example ( transformed_json_data , info_dict ) : import six import tensorflow as tf def _make_int64_list ( x ) : return tf . train . Feature ( int64_list = tf . train . Int64List ( value = x ) ) def _make_bytes_list ( x ) : return tf . train . Feature ( bytes_list = tf . train . BytesList ( value = x ) ) def _make_float_list ( x ) : return tf . train . Feature ( float_list = tf . train . FloatList ( value = x ) ) if sorted ( six . iterkeys ( transformed_json_data ) ) != sorted ( six . iterkeys ( info_dict ) ) : raise ValueError ( 'Keys do not match %s, %s' % ( list ( six . iterkeys ( transformed_json_data ) ) , list ( six . iterkeys ( info_dict ) ) ) ) ex_dict = { } for name , info in six . iteritems ( info_dict ) : if info [ 'dtype' ] == tf . int64 : ex_dict [ name ] = _make_int64_list ( transformed_json_data [ name ] ) elif info [ 'dtype' ] == tf . float32 : ex_dict [ name ] = _make_float_list ( transformed_json_data [ name ] ) elif info [ 'dtype' ] == tf . string : ex_dict [ name ] = _make_bytes_list ( transformed_json_data [ name ] ) else : raise ValueError ( 'Unsupported data type %s' % info [ 'dtype' ] ) ex = tf . train . Example ( features = tf . train . Features ( feature = ex_dict ) ) return ex . SerializeToString ( )
Makes a serialized tf . example .
395
9