idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
10,100
def import_submodules ( context , root_module , path ) : for _ , module_name , _ in pkgutil . walk_packages ( path , root_module + '.' ) : # this causes a Runtime error with model conflicts # module = loader.find_module(module_name).load_module(module_name) module = __import__ ( module_name , globals ( ) , locals ( ) , [ '__name__' ] ) for k , v in vars ( module ) . items ( ) : if not k . startswith ( '_' ) : context [ k ] = v context [ module_name ] = module
Import all submodules and register them in the context namespace .
141
12
10,101
def command ( func ) : classname = inspect . getouterframes ( inspect . currentframe ( ) ) [ 1 ] [ 3 ] name = func . __name__ help_name = name . replace ( "do_" , "help_" ) doc = textwrap . dedent ( func . __doc__ ) def new ( instance , args ) : # instance.new.__doc__ = doc try : argv = shlex . split ( args ) arguments = docopt ( doc , help = True , argv = argv ) func ( instance , args , arguments ) except SystemExit : if args not in ( '-h' , '--help' ) : Console . error ( "Could not execute the command." ) print ( doc ) new . __doc__ = doc return new
A decorator to create a function with docopt arguments . It also generates a help function
167
18
10,102
def addFile ( self , path , msg = "" ) : item = Item . from_path ( repo = self . repo , path = path ) self . addItem ( item )
Adds a file to the version
38
6
10,103
def addItem ( self , item ) : try : self . tree . addItem ( item ) except AttributeError , e : raise VersionError ( 'Saved versions are immutable' )
Adds an item if the tree is mutable
39
9
10,104
def removeItem ( self , item ) : try : self . tree . removeItem ( item ) except AttributeError , e : raise VersionError ( 'Saved versions are immutable' )
Removes an item if the tree is mutable
39
10
10,105
def iteritems ( self ) : if self . type in [ 'blob' ] : raise StopIteration for path , mode , sha in self . tree . iteritems ( ) : item = Item ( self , sha , path , mode ) yield item for i in item . iteritems ( ) : yield i
Generator that yields Items
67
5
10,106
def items ( self , path = None ) : items = list ( self . iteritems ( ) ) if path is not None : path += '$' regex = re . compile ( path ) items = [ i for i in items if regex . match ( i . path ) ] return items
Returns set of items .
60
5
10,107
def _get_blob ( self ) : if not self . __blob : self . __blob = self . repo . get_object ( self . id ) return self . __blob
read blob on access only because get_object is slow
42
11
10,108
def from_path ( self , repo , path , name = None ) : if name is None : name = os . path . basename ( path ) #FIXME: hack, there has to be a better way return Item . from_string ( repo = repo , name = name , string = open ( path ) . read ( ) )
Create a new Item from a file path .
71
9
10,109
def from_string ( self , repo , name , string ) : try : log . debug ( 'Creating new item: %s' % name ) blob = Blob . from_string ( string ) item = Item ( parent = repo , sha = blob . sha , path = name ) item . blob = blob return item except AssertionError , e : raise ItemError ( e )
Create a new Item from a data stream .
83
9
10,110
def save ( self , msg = None ) : if msg is None : msg = 'Saving %s' % self . name log . debug ( msg ) self . repo . addItem ( self , msg )
Modify item data and commit to repo . Git objects are immutable to save means adding a new item
44
20
10,111
def checkout ( self , path ) : if os . path . isdir ( path ) : path = os . path . join ( path , self . name ) try : log . debug ( 'Checking out %s to %s' % ( self . path , path ) ) f = open ( path , 'w' ) f . write ( self . data ( ) ) f . close ( ) return True except Exception , e : raise ItemError ( e )
Check out file data to path .
96
7
10,112
def save ( self , message ) : self . commit . message = message self . commit . tree = self . tree #TODO: store new blobs only for item in self . tree . items ( ) : self . repo . object_store . add_object ( item . blob ) self . repo . object_store . add_object ( self . tree ) # set HEAD to new commit self . repo . object_store . add_object ( self . commit ) self . repo . refs [ 'refs/heads/master' ] = self . commit . id
Add version to repo object store set repo head to version sha .
121
14
10,113
def new ( self , repo ) : #TODO: subclass Commit, pass parent as init param try : # create new commit instance and set metadata commit = Commit ( ) author = os . environ . get ( 'USER' ) commit . author = commit . committer = author commit . commit_time = commit . author_time = int ( time ( ) ) tz = parse_timezone ( '-0200' ) [ 0 ] commit . commit_timezone = commit . author_timezone = tz commit . encoding = "UTF-8" commit . message = '' # set previous version as parent to this one parent = repo . versions ( - 1 ) if parent : commit . parents = [ parent . id ] # create new tree, add entries from previous version tree = Tree ( ) curr = repo . versions ( - 1 ) if curr : for item in curr . items ( ) : tree . addItem ( item ) commit . tree = tree . id # create new version, and add tree version = Version ( repo = repo , commit = commit , tree = tree ) return version except Exception , e : traceback . print_exc ( ) return VersionError ( e )
Create a new version of a repo . Local object .
251
11
10,114
def confirm ( prompt = None , resp = False ) : if prompt is None : prompt = 'Confirm' if resp : prompt = '%s [%s]|%s: ' % ( prompt , 'y' , 'n' ) else : prompt = '%s [%s]|%s: ' % ( prompt , 'n' , 'y' ) while True : ans = raw_input ( prompt ) if not ans : return resp if ans not in [ 'y' , 'Y' , 'n' , 'N' ] : print 'please enter y or n.' continue if ans == 'y' or ans == 'Y' : return True if ans == 'n' or ans == 'N' : return False
Prompts user for confirmation .
158
7
10,115
def prompt ( name , default ) : value = raw_input ( '%s [%s]: ' % ( name , default ) ) if not value : value = default return value
Prompts user for raw input .
38
8
10,116
def new ( url ) : from grit import Repo return Repo . new ( url = url , bare = True )
Creates a new Repo class instance at url .
25
11
10,117
def checkout ( url , version = None ) : from grit import Repo r = Repo ( url ) def _write ( item ) : log . debug ( 'writing: %s' % item . name ) if item . type != 'blob' : return if r . type in [ 'repo' , 'proxy' , 'local' ] : path = os . path . join ( r . name , item . path ) pdir = os . path . dirname ( path ) if not os . path . isdir ( pdir ) : os . makedirs ( pdir ) else : path = item . name f = open ( path , 'w' ) f . write ( item . data ( ) ) f . close ( ) if r . type == 'blob' : _write ( r ) else : items = r . items ( ) count = 1 total = len ( items ) while count <= total : print '[%s/%s] %0.2f%%' % ( count , total , ( float ( count ) / total ) * 100 ) , '*' * count , '\r' , _write ( items [ count - 1 ] ) count += 1 sys . stdout . flush ( ) print
Checks out latest version of item or repository .
261
10
10,118
def checkin ( url , files , message = None ) : from grit import Repo , Item r = Repo ( url ) if not files : raise GritError ( 'No files' ) def _write ( path ) : item = Item . from_path ( repo = r , path = path ) if r . isLocal ( ) : v . addItem ( item = item ) else : r . upload ( filename = os . path . basename ( path ) , filedata = open ( path , 'r' ) . read ( ) ) if r . isLocal ( ) : v = r . addVersion ( ) count = 1 total = len ( files ) while count <= total : print '[%s/%s] %0.2f%%' % ( count , total , ( float ( count ) / total ) * 100 ) , '*' * count , '\r' , _write ( os . path . abspath ( files [ count - 1 ] ) ) count += 1 sys . stdout . flush ( ) if message is None : message = 'Publishing %s' % ', ' . join ( files ) if r . isLocal ( ) : v . save ( message = message ) print
Check in files to a repository .
256
7
10,119
def get ( cls , * * kwargs ) : fields = { } for field in cls . url_fields : value = kwargs . pop ( field , None ) if value is None : cls . _handle_wrong_field ( field , ATTR_TYPE_URL ) fields [ field ] = value # Create an instance of the model class and make the GET request model = cls ( * * fields ) model . _populate ( * * kwargs ) return model
Retrieve an object by making a GET request to Transifex .
105
14
10,120
def save ( self , * * fields ) : for field in fields : if field in self . writable_fields : setattr ( self , field , fields [ field ] ) else : self . _handle_wrong_field ( field , ATTR_TYPE_WRITE ) if self . _populated_fields : self . _update ( * * self . _modified_fields ) else : self . _create ( * * self . _modified_fields )
Save the instance to the remote Transifex server .
97
11
10,121
def _get ( self , * * kwargs ) : path = self . _construct_path_to_item ( ) return self . _http . get ( path )
Get the resource from a remote Transifex server .
37
11
10,122
def _create ( self , * * kwargs ) : path = self . _construct_path_to_collection ( ) # Use the fields for which we have values for field in self . writable_fields : try : value = getattr ( self , field ) kwargs [ field ] = value except AttributeError : pass return self . _http . post ( path , json . dumps ( kwargs ) )
Create a resource in the remote Transifex server .
90
11
10,123
def _update ( self , * * kwargs ) : path = self . _construct_path_to_item ( ) if not kwargs : return return self . _http . put ( path , json . dumps ( kwargs ) )
Update a resource in a remote Transifex server .
53
11
10,124
def _delete ( self , * * kwargs ) : path = self . _construct_path_to_item ( ) return self . _http . delete ( path )
Delete a resource from a remote Transifex server .
37
11
10,125
def get_url_parameters ( self ) : url_fields = { } for field in self . url_fields : url_fields [ field ] = getattr ( self , field ) return url_fields
Create a dictionary of parameters used in URLs for this model .
44
12
10,126
def _handle_wrong_field ( cls , field_name , field_type ) : if field_type == ATTR_TYPE_READ : field_type = 'readable' elif field_type == ATTR_TYPE_WRITE : field_type = 'writable' elif field_type == ATTR_TYPE_URL : field_type = 'URL' else : raise AttributeError ( 'Invalid attribute type: {}' . format ( field_type ) ) msg = '{} has no {} attribute "{}"' . format ( cls . __name__ , field_type , field_name ) _logger . error ( msg ) raise AttributeError ( msg )
Raise an exception whenever an invalid attribute with the given name was attempted to be set to or retrieved from this model class .
148
25
10,127
def update_http_rules ( rules , content_type = 'text/plain' ) : for kw in deepcopy ( rules ) : kw [ 'url' ] = re . compile ( kw [ 'url' ] ) # ensure headers dict for at least have a default content type if 'Content-Type' not in kw . get ( 'headers' , { } ) : kw [ 'headers' ] = dict ( kw . get ( 'headers' , { } ) , * * { 'Content-Type' : content_type , } ) method = kw . pop ( 'method' ) url = kw . pop ( 'url' ) http_mock . register_uri ( method , url , * * kw )
Adds rules to global http mock .
161
7
10,128
def get_task_history ( last_task ) : if hasattr ( last_task , 'branch' ) and last_task . branch : return elif hasattr ( last_task , 'hide' ) and last_task . hide : return else : return get_func_info ( last_task )
Append last task to task history .
67
8
10,129
def get_func_info ( func ) : name = func . __name__ doc = func . __doc__ or "" try : nicename = func . description except AttributeError : if doc : nicename = doc . split ( '\n' ) [ 0 ] if len ( nicename ) > 80 : nicename = name else : nicename = name parameters = [ ] try : closure = func . func_closure except AttributeError : closure = func . __closure__ try : varnames = func . func_code . co_freevars except AttributeError : varnames = func . __code__ . co_freevars if closure : for index , arg in enumerate ( closure ) : if not callable ( arg . cell_contents ) : parameters . append ( ( varnames [ index ] , text_type ( arg . cell_contents ) ) ) return ( { "nicename" : nicename , "doc" : doc , "parameters" : parameters , "name" : name , "time" : str ( datetime . datetime . now ( ) ) , "hostname" : socket . gethostname ( ) , } )
Retrieve a function s information .
249
7
10,130
def get_workflow_info ( func_list ) : funcs = [ ] for item in func_list : if item is None : continue if isinstance ( item , list ) : funcs . append ( get_workflow_info ( item ) ) else : funcs . append ( get_func_info ( item ) ) return funcs
Return function info go through lists recursively .
74
10
10,131
def _copy_context_into_mutable ( context ) : def make_mutable ( val ) : if isinstance ( val , Mapping ) : return dict ( val ) else : return val if not isinstance ( context , ( str , Mapping ) ) : try : return [ make_mutable ( val ) for val in context ] except TypeError : pass return make_mutable ( context )
Copy a properly formatted context into a mutable data structure .
86
12
10,132
def make_dataset_models ( dataset , schemas_and_tables , metadata_dict = None , version : int = 1 , include_contacts = False ) : if metadata_dict is None : metadata_dict = { } validate_types ( schemas_and_tables ) dataset_dict = { } cell_segment_model = make_cell_segment_model ( dataset , version = version ) dataset_dict [ root_model_name . lower ( ) ] = cell_segment_model for schema_name , table_name in schemas_and_tables : model_key = table_name metadata = metadata_dict . get ( table_name , None ) dataset_dict [ model_key ] = make_annotation_model ( dataset , schema_name , table_name , table_metadata = metadata , version = version ) if include_contacts : contact_model = make_annotation_model_from_schema ( dataset , 'contact' , Contact , version = version ) dataset_dict [ 'contact' ] = contact_model return dataset_dict
make all the models for a dataset
237
7
10,133
def _key_name ( self ) : # type: () -> str if self . _key is not None : return self . _key return self . __class__ . __name__ . lower ( )
Return the key referring to this object
43
7
10,134
def _path ( self ) : # type: () -> str if self . _parent : return '{}.{}' . format ( self . _parent . _path ( ) , self . _key_name ( ) ) return self . _key_name ( )
Return the dotted path representation of this object
57
8
10,135
def _add_error ( self , * args , * * kwargs ) : # type: () -> None if kwargs . get ( 'node' , None ) : # if node specified and not none error = ConfigError . create_from_yaml_node ( * args , * * kwargs ) elif self . _value_node : # default to using the node if we have one error = ConfigError . create_from_yaml_node ( node = self . _value_node , * args , * * kwargs ) else : # no nodes or error_obj to attach error = ConfigError ( * args , * * kwargs ) self . _errors . append ( error )
Convenience function to add an error to this object with line numbers
153
14
10,136
def _get_descendants_errors ( self ) : # type: () -> List(ConfigError) descendants_errors = [ ] if hasattr ( self , '_children' ) : if isinstance ( self . _children , ( list , tuple ) ) : for c in self . _children : descendants_errors += c . _get_all_errors ( ) elif isinstance ( self . _children , dict ) : for c in self . _children . values ( ) : descendants_errors += c . _get_all_errors ( ) return descendants_errors
Recursively get errors from descendants
121
7
10,137
def _validate ( self ) : # type: () -> None # class can specify it's empty obj -- list would have empty of [] self . _errors = [ ] self . _validate_type ( ) if self . is_valid ( ) : self . _validate_value ( )
Run validation save errors to object in self . _errors
63
11
10,138
def _validate_type ( self ) : # type: () -> None if not isinstance ( self . _value , self . _type ) : title = '{} has an invalid type' . format ( self . _key_name ( ) ) description = '{} must be a {}' . format ( self . _key_name ( ) , self . _type . __name__ ) self . _add_error ( title = title , description = description )
Validation to ensure value is the correct type
100
9
10,139
def haveSnapshots ( self ) : return os . path . islink ( self . latestLink ) and os . path . isdir ( self . latestLink )
Check if we have at least one snapshot .
34
9
10,140
def fromScratch ( self ) : assert ( not os . path . lexists ( self . latestLink ) or os . path . islink ( self . latestLink ) ) self . rmR ( self . latestLink ) return self
Start a fresh experiment from scratch . Returns self .
49
10
10,141
def snapshot ( self ) : nextSnapshotNum = self . nextSnapshotNum nextSnapshotPath = self . getFullPathToSnapshot ( nextSnapshotNum ) if os . path . lexists ( nextSnapshotPath ) : self . rmR ( nextSnapshotPath ) self . mkdirp ( os . path . join ( nextSnapshotPath , ".experiment" ) ) return self . dump ( nextSnapshotPath ) . __markLatest ( nextSnapshotNum )
Take a snapshot of the experiment . Returns self .
103
10
10,142
def rollback ( self , n = None ) : if n is None : if self . haveSnapshots : return self . fromSnapshot ( self . latestLink ) else : return self . fromScratch ( ) elif isinstance ( n , int ) : loadSnapshotPath = self . getFullPathToSnapshot ( n ) assert ( os . path . isdir ( loadSnapshotPath ) ) return self . __markLatest ( n ) . fromSnapshot ( loadSnapshotPath ) else : raise ValueError ( "n must be int, or None!" )
Roll back the experiment to the given snapshot number . Returns self .
121
13
10,143
def getFullPathToSnapshot ( self , n ) : return os . path . join ( self . snapDir , str ( n ) )
Get the full path to snapshot n .
30
8
10,144
def strategyLastK ( kls , n , k = 10 ) : return set ( map ( str , filter ( lambda x : x >= 0 , range ( n , n - k , - 1 ) ) ) )
Return the directory names to preserve under the LastK purge strategy .
45
13
10,145
def strategyKLogN ( kls , n , k = 4 ) : assert ( k > 1 ) s = set ( [ n ] ) i = 0 while k ** i <= n : s . update ( range ( n , n - k * k ** i , - k ** i ) ) i += 1 n -= n % k ** i return set ( map ( str , filter ( lambda x : x >= 0 , s ) ) )
Return the directory names to preserve under the KLogN purge strategy .
92
14
10,146
def listSnapshotDir ( kls , path ) : snapshotSet = set ( ) nonsnapshotSet = set ( ) try : entryList = os . listdir ( path ) for e in entryList : if kls . isFilenameInteger ( e ) : snapshotSet . add ( e ) else : nonsnapshotSet . add ( e ) except FileNotFoundError : pass finally : return snapshotSet , nonsnapshotSet
Return the set of snapshot directories and non - snapshot directories under the given path .
93
16
10,147
def rmR ( kls , path ) : if os . path . islink ( path ) or os . path . isfile ( path ) : os . unlink ( path ) elif os . path . isdir ( path ) : walker = os . walk ( path , topdown = False , followlinks = False ) for dirpath , dirnames , filenames in walker : for f in filenames : os . unlink ( os . path . join ( dirpath , f ) ) for d in dirnames : os . rmdir ( os . path . join ( dirpath , d ) ) os . rmdir ( path )
rm - R path . Deletes but does not recurse into symlinks . If the path does not exist silently return .
139
25
10,148
def atomicSymlink ( kls , target , name ) : linkAtomicName = name + ".ATOMIC" linkFinalName = name linkTarget = target if os . path . lexists ( linkAtomicName ) : kls . rmR ( linkAtomicName ) os . symlink ( linkTarget , linkAtomicName ) ################################################ ######## FILESYSTEM LINEARIZATION POINT ######## ######## vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv ######## os . rename ( linkAtomicName , linkFinalName )
Same syntax as os . symlink except that the new link called name will first be created with the name and target name . ATOMIC - > target then be atomically renamed to name - > target thus overwriting any previous symlink there . If a filesystem entity called name . ATOMIC already exists it will be forcibly removed .
126
71
10,149
def _compensate_temperature ( self , adc_t ) : var_1 = ( ( adc_t / 16384.0 - self . _calibration_t [ 0 ] / 1024.0 ) * self . _calibration_t [ 1 ] ) var_2 = ( ( adc_t / 131072.0 - self . _calibration_t [ 0 ] / 8192.0 ) * ( adc_t / 131072.0 - self . _calibration_t [ 0 ] / 8192.0 ) * self . _calibration_t [ 2 ] ) self . _temp_fine = var_1 + var_2 if self . _delta_temp != 0. : # temperature correction for self heating temp = self . _temp_fine / 5120.0 + self . _delta_temp self . _temp_fine = temp * 5120.0 else : temp = self . _temp_fine / 5120.0 return temp
Compensate temperature .
222
5
10,150
def _compensate_pressure ( self , adc_p ) : var_1 = ( self . _temp_fine / 2.0 ) - 64000.0 var_2 = ( ( var_1 / 4.0 ) * ( var_1 / 4.0 ) ) / 2048 var_2 *= self . _calibration_p [ 5 ] var_2 += ( ( var_1 * self . _calibration_p [ 4 ] ) * 2.0 ) var_2 = ( var_2 / 4.0 ) + ( self . _calibration_p [ 3 ] * 65536.0 ) var_1 = ( ( ( self . _calibration_p [ 2 ] * ( ( ( var_1 / 4.0 ) * ( var_1 / 4.0 ) ) / 8192 ) ) / 8 ) + ( ( self . _calibration_p [ 1 ] * var_1 ) / 2.0 ) ) var_1 /= 262144 var_1 = ( ( 32768 + var_1 ) * self . _calibration_p [ 0 ] ) / 32768 if var_1 == 0 : return 0 pressure = ( ( 1048576 - adc_p ) - ( var_2 / 4096 ) ) * 3125 if pressure < 0x80000000 : pressure = ( pressure * 2.0 ) / var_1 else : pressure = ( pressure / var_1 ) * 2 var_1 = ( self . _calibration_p [ 8 ] * ( ( ( pressure / 8.0 ) * ( pressure / 8.0 ) ) / 8192.0 ) ) / 4096 var_2 = ( ( pressure / 4.0 ) * self . _calibration_p [ 7 ] ) / 8192.0 pressure += ( ( var_1 + var_2 + self . _calibration_p [ 6 ] ) / 16.0 ) return pressure / 100
Compensate pressure .
424
5
10,151
def _compensate_humidity ( self , adc_h ) : var_h = self . _temp_fine - 76800.0 if var_h == 0 : return 0 var_h = ( ( adc_h - ( self . _calibration_h [ 3 ] * 64.0 + self . _calibration_h [ 4 ] / 16384.0 * var_h ) ) * ( self . _calibration_h [ 1 ] / 65536.0 * ( 1.0 + self . _calibration_h [ 5 ] / 67108864.0 * var_h * ( 1.0 + self . _calibration_h [ 2 ] / 67108864.0 * var_h ) ) ) ) var_h *= 1.0 - self . _calibration_h [ 0 ] * var_h / 524288.0 if var_h > 100.0 : var_h = 100.0 elif var_h < 0.0 : var_h = 0.0 return var_h
Compensate humidity .
236
5
10,152
def _take_forced_measurement ( self ) : # set to forced mode, i.e. "take next measurement" self . _bus . write_byte_data ( self . _i2c_add , 0xF4 , self . ctrl_meas_reg ) while self . _bus . read_byte_data ( self . _i2c_add , 0xF3 ) & 0x08 : sleep ( 0.005 )
Take a forced measurement .
100
5
10,153
def update ( self , first_reading = False ) : try : if first_reading or not self . _ok : self . _bus . write_byte_data ( self . _i2c_add , 0xF2 , self . ctrl_hum_reg ) self . _bus . write_byte_data ( self . _i2c_add , 0xF5 , self . config_reg ) self . _bus . write_byte_data ( self . _i2c_add , 0xF4 , self . ctrl_meas_reg ) self . _populate_calibration_data ( ) if self . mode == 2 : # MODE_FORCED self . _take_forced_measurement ( ) data = [ ] for i in range ( 0xF7 , 0xF7 + 8 ) : data . append ( self . _bus . read_byte_data ( self . _i2c_add , i ) ) except OSError as exc : self . log_error ( "Bad update: %s" , exc ) self . _ok = False return pres_raw = ( data [ 0 ] << 12 ) | ( data [ 1 ] << 4 ) | ( data [ 2 ] >> 4 ) temp_raw = ( data [ 3 ] << 12 ) | ( data [ 4 ] << 4 ) | ( data [ 5 ] >> 4 ) hum_raw = ( data [ 6 ] << 8 ) | data [ 7 ] self . _ok = False temperature = self . _compensate_temperature ( temp_raw ) if ( temperature >= - 20 ) and ( temperature < 80 ) : self . _temperature = temperature self . _ok = True if self . _with_humidity : humidity = self . _compensate_humidity ( hum_raw ) if ( humidity >= 0 ) and ( humidity <= 100 ) : self . _humidity = humidity else : self . _ok = False if self . _with_pressure : pressure = self . _compensate_pressure ( pres_raw ) if pressure > 100 : self . _pressure = pressure else : self . _ok = False
Read raw data and update compensated variables .
465
8
10,154
def append ( self , element ) : assert element . locus == self . locus , ( "Element locus (%s) != Pileup locus (%s)" % ( element . locus , self . locus ) ) self . elements [ element ] = None
Append a PileupElement to this Pileup . If an identical PileupElement is already part of this Pileup do nothing .
57
31
10,155
def update ( self , other ) : assert self . locus == other . locus self . elements . update ( other . elements )
Add all pileup elements from other into self .
28
10
10,156
def filter ( self , filters ) : new_elements = [ e for e in self . elements if all ( function ( e ) for function in filters ) ] return Pileup ( self . locus , new_elements )
Apply filters to the pileup elements and return a new Pileup with the filtered elements removed .
49
20
10,157
def new_task ( func ) : @ wraps ( func ) async def wrapper ( self , * args , * * kwargs ) : loop = get_event_loop ( ) loop . create_task ( func ( self , * args , * * kwargs ) ) return wrapper
Runs the decorated function in a new task
60
9
10,158
async def providers ( ) : for provider in settings . ANALYTICS_PROVIDERS : cls : BaseAnalytics = import_class ( provider [ 'class' ] ) yield await cls . instance ( * provider [ 'args' ] )
Iterates over all instances of analytics provider found in configuration
55
11
10,159
async def page_view ( self , url : str , title : str , user_id : str , user_lang : str = '' ) -> None : raise NotImplementedError
Track the view of a page
40
6
10,160
def hash_user_id ( self , user_id : str ) -> str : h = sha256 ( ) h . update ( user_id . encode ( ) ) return h . hexdigest ( )
As per the law anonymize user identifier before sending it .
45
12
10,161
def delete ( cls , uuid ) : to_delete = Workflow . query . get ( uuid ) db . session . delete ( to_delete )
Delete a workflow .
34
4
10,162
def run_worker ( wname , data , engine_uuid_hex = None , * * kwargs ) : if 'stop_on_halt' not in kwargs : kwargs [ 'stop_on_halt' ] = False if engine_uuid_hex : engine_uuid = uuid . UUID ( hex = engine_uuid_hex ) engine = WorkflowEngine . from_uuid ( uuid = engine_uuid , * * kwargs ) else : engine = WorkflowEngine . with_name ( wname , * * kwargs ) engine . save ( ) objects = get_workflow_object_instances ( data , engine ) db . session . commit ( ) engine . process ( objects , * * kwargs ) return engine
Run a workflow by name with list of data objects .
172
11
10,163
def restart_worker ( uuid , * * kwargs ) : if 'stop_on_halt' not in kwargs : kwargs [ 'stop_on_halt' ] = False engine = WorkflowEngine . from_uuid ( uuid = uuid , * * kwargs ) if "data" not in kwargs : objects = workflow_object_class . query ( id_workflow = uuid ) else : data = kwargs . pop ( "data" ) if not isinstance ( data , ( list , tuple ) ) : data = [ data ] objects = get_workflow_object_instances ( data , engine ) db . session . commit ( ) engine . process ( objects , * * kwargs ) return engine
Restart workflow from beginning with given engine UUID and any data .
166
14
10,164
def get_workflow_object_instances ( data , engine ) : workflow_objects = [ ] data_type = engine . get_default_data_type ( ) for data_object in data : if isinstance ( data_object , workflow_object_class . _get_current_object ( ) ) : if not data_object . data_type : data_object . data_type = data_type if data_object . id : data_object . log . debug ( "Existing workflow object found for " "this object." ) if data_object . status == data_object . known_statuses . COMPLETED : data_object . status = data_object . known_statuses . INITIAL workflow_objects . append ( data_object ) else : # Data is not already a WorkflowObject, we then # add the running object to run through the workflow. current_obj = create_data_object_from_data ( data_object , engine , data_type ) workflow_objects . append ( current_obj ) return workflow_objects
Analyze data and create corresponding WorkflowObjects .
227
11
10,165
def create_data_object_from_data ( data_object , engine , data_type ) : # Data is not already a WorkflowObject, we first # create an initial object for each data object. return workflow_object_class . create ( data = data_object , id_workflow = engine . uuid , status = workflow_object_class . known_statuses . INITIAL , data_type = data_type , )
Create a new WorkflowObject from given data and return it .
95
13
10,166
def _print_rst ( self , what ) : print print "Command - %s::" % what exec ( "h = self.do_%s.__doc__" % what ) # noinspection PyUnboundLocalVariable h = textwrap . dedent ( h ) . replace ( "::\n\n" , "" ) h = textwrap . dedent ( h ) . replace ( "\n" , "\n " ) print h
prints the rst page of the command what
97
9
10,167
def load_json ( cls , data , default_rule = None , raise_error = False ) : rules = { k : _parser . parse_rule ( v , raise_error ) for k , v in json . loads ( data ) . items ( ) } return cls ( rules , default_rule )
Allow loading of JSON rule data .
67
7
10,168
def from_dict ( cls , rules_dict : dict , default_rule = None , raise_error = False ) : # Parse the rules stored in the dictionary rules = { k : _parser . parse_rule ( v , raise_error ) for k , v in rules_dict . items ( ) } return cls ( rules , default_rule )
Allow loading of rule data from a dictionary .
77
9
10,169
def _set_rules ( self , rules : dict , overwrite = True ) : if not isinstance ( rules , dict ) : raise TypeError ( 'rules must be an instance of dict or Rules,' 'got %r instead' % type ( rules ) ) if overwrite : self . rules = Rules ( rules , self . default_rule ) else : self . rules . update ( rules )
Created a new Rules object based on the provided dict of rules .
81
13
10,170
def load_rules ( self , force_reload = False , overwrite = True ) : # double-checked locking if self . load_once and self . _policy_loaded : return with self . _load_lock : if self . load_once and self . _policy_loaded : return reloaded , data = _cache . read_file ( self . policy_file , force_reload = force_reload ) self . _policy_loaded = True if reloaded or not self . rules : rules = Rules . load_json ( data , self . default_rule , self . raise_error ) self . _set_rules ( rules , overwrite = overwrite ) LOG . debug ( 'Reload policy file: %s' , self . policy_file )
Load rules from policy file or cache .
162
8
10,171
def enforce ( self , rule , target , creds , exc = None , * args , * * kwargs ) : self . load_rules ( ) if isinstance ( rule , checks . BaseCheck ) : result = rule ( target , creds , self , rule ) elif not self . rules : # No rules means we're going to fail closed. result = False else : try : # Evaluate the rule result = self . rules [ rule ] ( target , creds , self , rule ) except KeyError : LOG . debug ( 'Rule [%s] does not exist' , rule ) # If the rule doesn't exist, fail closed result = False if self . raise_error and not result : if exc : raise exc ( * args , * * kwargs ) else : raise PolicyNotAuthorized ( rule , target , creds ) return result
Checks authorization of a rule against the target and credentials .
182
12
10,172
def get_flattened_bsp_keys_from_schema ( schema ) : keys = [ ] for key in schema . declared_fields . keys ( ) : field = schema . declared_fields [ key ] if isinstance ( field , mm . fields . Nested ) and isinstance ( field . schema , BoundSpatialPoint ) : keys . append ( "{}.{}" . format ( key , "position" ) ) return keys
Returns the flattened keys of BoundSpatialPoints in a schema
93
12
10,173
def lock ( self ) -> asyncio . Lock : if self . lock_key not in self . request . custom_content : self . request . custom_content [ self . lock_key ] = asyncio . Lock ( ) return self . request . custom_content [ self . lock_key ]
Return and generate if required the lock for this request .
63
11
10,174
async def get_value ( self ) : cc = self . request . custom_content async with self . lock : if self . content_key not in cc : cc [ self . content_key ] = await self . call_api ( ) return cc [ self . content_key ]
Get the value from the API . Make sure to use a lock in order not to fetch the value twice at the same time .
61
26
10,175
async def rank ( self ) -> Optional [ float ] : if not self . request . has_layer ( l . RawText ) : return tl = self . request . get_layer ( l . RawText ) matcher = Matcher ( [ tuple ( Trigram ( y ) for y in x ) for x in await self . intent . strings ( self . request ) ] ) return matcher % Trigram ( tl . text )
If there is a text layer inside the request try to find a matching text in the specified intent .
95
20
10,176
def _rank_qr ( self , choices ) : from bernard . platforms . facebook import layers as fbl try : qr = self . request . get_layer ( fbl . QuickReply ) self . chosen = choices [ qr . slug ] self . slug = qr . slug if self . when is None or self . when == qr . slug : return 1.0 except KeyError : pass
Look for the QuickReply layer s slug into available choices .
88
12
10,177
async def _rank_text ( self , choices ) : tl = self . request . get_layer ( l . RawText ) best = 0.0 for slug , params in choices . items ( ) : strings = [ ] if params [ 'intent' ] : intent = getattr ( intents , params [ 'intent' ] ) strings += await intent . strings ( self . request ) if params [ 'text' ] : strings . append ( ( params [ 'text' ] , ) ) matcher = Matcher ( [ tuple ( Trigram ( y ) for y in x ) for x in strings ] ) score = matcher % Trigram ( await render ( tl . text , self . request ) ) if score > best : self . chosen = params self . slug = slug best = score if self . when is None or self . slug == self . when : return best
Try to match the TextLayer with choice s intents .
188
12
10,178
def check_recommended_attributes ( self , dataset ) : results = [ ] recommended_ctx = TestCtx ( BaseCheck . MEDIUM , 'Recommended global attributes' ) # Check time_coverage_duration and resolution for attr in [ 'time_coverage_duration' , 'time_coverage_resolution' ] : attr_value = getattr ( dataset , attr , '' ) try : parse_duration ( attr_value ) recommended_ctx . assert_true ( True , '' ) # Score it True! except Exception : recommended_ctx . assert_true ( False , '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}' . format ( attr , attr_value ) ) results . append ( recommended_ctx . to_result ( ) ) return results
Feature type specific check of global recommended attributes .
181
9
10,179
def check_dimensions ( self , dataset ) : required_ctx = TestCtx ( BaseCheck . HIGH , 'All geophysical variables are time-series incomplete feature types' ) message = '{} must be a valid timeseries feature type. It must have dimensions of (timeSeries, time).' message += ' And all coordinates must have dimensions of (timeSeries)' for variable in util . get_geophysical_variables ( dataset ) : is_valid = util . is_multi_timeseries_incomplete ( dataset , variable ) required_ctx . assert_true ( is_valid , message . format ( variable ) ) return required_ctx . to_result ( )
Checks that the feature types of this dataset are consitent with a time series incomplete dataset
143
18
10,180
def read_file ( filename : str , force_reload = False ) : if force_reload : _delete_cached_file ( filename ) reloaded = False mtime = os . path . getmtime ( filename ) cache_info = CACHE . setdefault ( filename , { } ) if not cache_info or mtime > cache_info . get ( 'mtime' , 0 ) : LOG . debug ( 'Reloading cached file %s' , filename ) with open ( filename ) as fp : cache_info [ 'data' ] = fp . read ( ) cache_info [ 'mtime' ] = mtime reloaded = True return reloaded , cache_info [ 'data' ]
Read a file if it has been modified .
157
9
10,181
def use_model_attr ( attr ) : def use_model_validator ( instance , attribute , value ) : getattr ( instance , attr ) ( instance , attribute , value ) return use_model_validator
Use the validator set on a separate attribute on the class .
48
13
10,182
def is_creation_model ( instance , attribute , value ) : creation_name = value . get ( 'name' ) if not isinstance ( creation_name , str ) : instance_name = instance . __class__ . __name__ err_str = ( "'name' must be given as a string in the '{attr}' " "parameter of a '{cls}'. Given " "'{value}'" ) . format ( attr = attribute . name , cls = instance_name , value = creation_name ) raise ModelDataError ( err_str )
Must include at least a name key .
124
8
10,183
def is_manifestation_model ( instance , attribute , value ) : instance_name = instance . __class__ . __name__ is_creation_model ( instance , attribute , value ) manifestation_of = value . get ( 'manifestationOfWork' ) if not isinstance ( manifestation_of , str ) : err_str = ( "'manifestationOfWork' must be given as a string in the " "'{attr}' parameter of a '{cls}'. Given " "'{value}'" ) . format ( attr = attribute . name , cls = instance_name , value = manifestation_of ) print ( err_str )
Must include a manifestationOfWork key .
141
8
10,184
def add_preprocessor ( preproc ) : def decorator ( func ) : func = ScriptAdaptor . _wrap ( func ) func . _add_preprocessor ( preproc ) return func return decorator
Define a preprocessor to run after the arguments are parsed and before the function is executed when running in console script mode .
44
25
10,185
def add_postprocessor ( postproc ) : def decorator ( func ) : func = ScriptAdaptor . _wrap ( func ) func . _add_postprocessor ( postproc ) return func return decorator
Define a postprocessor to run after the function is executed when running in console script mode .
44
19
10,186
def _setup_logging ( args ) : log_conf = getattr ( args , 'logging' , None ) if log_conf : logging . config . fileConfig ( log_conf ) else : logging . basicConfig ( )
Set up logging for the script based on the configuration specified by the logging attribute of the command line arguments .
50
21
10,187
def setup_limits ( conf_file , limits_file , do_reload = True , dry_run = False , debug = False ) : # If dry_run is set, default debug to True if dry_run : debug = True # Connect to the database... conf = config . Config ( conf_file = conf_file ) db = conf . get_database ( ) limits_key = conf [ 'control' ] . get ( 'limits_key' , 'limits' ) control_channel = conf [ 'control' ] . get ( 'channel' , 'control' ) # Parse the limits file limits_tree = etree . parse ( limits_file ) # Now, we parse the limits XML file lims = [ ] for idx , lim in enumerate ( limits_tree . getroot ( ) ) : # Skip tags we don't recognize if lim . tag != 'limit' : warnings . warn ( "Unrecognized tag %r in limits file at index %d" % ( lim . tag , idx ) ) continue # Construct the limit and add it to the list of limits try : lims . append ( parse_limit_node ( db , idx , lim ) ) except Exception as exc : warnings . warn ( "Couldn't understand limit at index %d: %s" % ( idx , exc ) ) continue # Now that we have the limits, let's install them if debug : print >> sys . stderr , "Installing the following limits:" for lim in lims : print >> sys . stderr , " %r" % lim if not dry_run : database . limit_update ( db , limits_key , lims ) # Were we requested to reload the limits? if do_reload is False : return # OK, figure out what kind of reload to do params = [ ] if do_reload is True : # Nothing to do; use default semantics pass elif ( isinstance ( do_reload , ( int , long , float ) ) or ( isinstance ( do_reload , basestring ) and do_reload . isdigit ( ) ) ) : params = [ 'spread' , do_reload ] else : params = [ str ( do_reload ) ] # Issue the reload command if debug : cmd = [ 'reload' ] cmd . extend ( params ) print >> sys . stderr , ( "Issuing command: %s" % ' ' . join ( str ( c ) for c in cmd ) ) if not dry_run : database . command ( db , control_channel , 'reload' , * params )
Set up or update limits in the Redis database .
558
11
10,188
def make_limit_node ( root , limit ) : # Build the base limit node limit_node = etree . SubElement ( root , 'limit' , { 'class' : limit . _limit_full_name } ) # Walk through all the recognized attributes for attr in sorted ( limit . attrs ) : desc = limit . attrs [ attr ] attr_type = desc . get ( 'type' , str ) value = getattr ( limit , attr ) # Determine the default value, if we have one... if 'default' in desc : default = ( desc [ 'default' ] ( ) if callable ( desc [ 'default' ] ) else desc [ 'default' ] ) # Skip attributes that have their default settings if value == default : continue # Set up the attr node attr_node = etree . SubElement ( limit_node , 'attr' , name = attr ) # Treat lists and dicts specially if attr_type == list : for val in value : val_node = etree . SubElement ( attr_node , 'value' ) val_node . text = str ( val ) elif attr_type == dict : for key , val in sorted ( value . items ( ) , key = lambda x : x [ 0 ] ) : val_node = etree . SubElement ( attr_node , 'value' , key = key ) val_node . text = str ( val ) else : attr_node . text = str ( value )
Given a Limit object generate an XML node .
325
9
10,189
def dump_limits ( conf_file , limits_file , debug = False ) : # Connect to the database... conf = config . Config ( conf_file = conf_file ) db = conf . get_database ( ) limits_key = conf [ 'control' ] . get ( 'limits_key' , 'limits' ) # Now, grab all the limits lims = [ limits . Limit . hydrate ( db , msgpack . loads ( lim ) ) for lim in db . zrange ( limits_key , 0 , - 1 ) ] # Build up the limits tree root = etree . Element ( 'limits' ) limit_tree = etree . ElementTree ( root ) for idx , lim in enumerate ( lims ) : if debug : print >> sys . stderr , "Dumping limit index %d: %r" % ( idx , lim ) make_limit_node ( root , lim ) # Write out the limits file if limits_file == '-' : limits_file = sys . stdout if debug : print >> sys . stderr , "Dumping limits to file %r" % limits_file limit_tree . write ( limits_file , xml_declaration = True , encoding = 'UTF-8' , pretty_print = True )
Dump the current limits from the Redis database .
274
11
10,190
def remote_daemon ( conf_file ) : eventlet . monkey_patch ( ) conf = config . Config ( conf_file = conf_file ) daemon = remote . RemoteControlDaemon ( None , conf ) daemon . serve ( )
Run the external control daemon .
51
6
10,191
def turnstile_command ( conf_file , command , arguments = [ ] , channel = None , debug = False ) : # Connect to the database... conf = config . Config ( conf_file = conf_file ) db = conf . get_database ( ) control_channel = conf [ 'control' ] . get ( 'channel' , 'control' ) # Now, set up the command command = command . lower ( ) ts_conv = False if command == 'ping' : # We handle 'ping' specially; first, figure out the channel if arguments : channel = arguments [ 0 ] else : channel = str ( uuid . uuid4 ( ) ) arguments = [ channel ] # Next, add on a timestamp if len ( arguments ) < 2 : arguments . append ( time . time ( ) ) ts_conv = True # Limit the argument list length arguments = arguments [ : 2 ] # OK, the command is all set up. Let us now send the command... if debug : cmd = [ command ] + arguments print >> sys . stderr , ( "Issuing command: %s" % ' ' . join ( cmd ) ) database . command ( db , control_channel , command , * arguments ) # Were we asked to listen on a channel? if not channel : return # OK, let's subscribe to the channel... pubsub = db . pubsub ( ) pubsub . subscribe ( channel ) # Now we listen... try : count = 0 for msg in pubsub . listen ( ) : # Make sure the message is one we're interested in if debug : formatted = pprint . pformat ( msg ) print >> sys . stderr , "Received message: %s" % formatted if ( msg [ 'type' ] not in ( 'pmessage' , 'message' ) or msg [ 'channel' ] != channel ) : continue count += 1 # Figure out the response response = msg [ 'data' ] . split ( ':' ) # If this is a 'pong' and ts_conv is true, add an RTT to # the response if ts_conv and response [ 0 ] == 'pong' : try : rtt = ( time . time ( ) - float ( response [ 2 ] ) ) * 100 response . append ( '(RTT %.2fms)' % rtt ) except Exception : # IndexError or ValueError, probably; ignore it pass # Print out the response print "Response % 5d: %s" % ( count , ' ' . join ( response ) ) except KeyboardInterrupt : # We want to break out of the loop, but not return any error # to the caller... pass
Issue a command to all running control daemons .
560
11
10,192
def compactor_daemon ( conf_file ) : eventlet . monkey_patch ( ) conf = config . Config ( conf_file = conf_file ) compactor . compactor ( conf )
Run the compactor daemon .
42
6
10,193
def _wrap ( cls , func ) : if isinstance ( func , cls ) : return func return functools . update_wrapper ( cls ( func ) , func )
Ensures that the function is wrapped in a ScriptAdaptor object . If it is not a new ScriptAdaptor will be returned . If it is the ScriptAdaptor is returned .
39
38
10,194
def setup_args ( self , parser ) : # Add all the arguments to the argument parser for args , kwargs in self . _arguments : parser . add_argument ( * args , * * kwargs )
Set up an argparse . ArgumentParser object by adding all the arguments taken by the function .
47
19
10,195
def get_kwargs ( self , args ) : # Now we need to figure out which arguments the final function # actually needs kwargs = { } argspec = inspect . getargspec ( self . _func ) required = set ( argspec . args [ : - len ( argspec . defaults ) ] if argspec . defaults else argspec . args ) for arg_name in argspec . args : try : kwargs [ arg_name ] = getattr ( args , arg_name ) except AttributeError : if arg_name in required : # If this happens, that's a programming failure raise # If the function accepts any keyword argument, add whatever # remains if argspec . keywords : for key , value in args . __dict__ . items ( ) : if key in kwargs : # Already handled continue kwargs [ key ] = value return kwargs
Given a Namespace object drawn from argparse determines the keyword arguments to pass to the underlying function . Note that if the underlying function accepts all keyword arguments the dictionary returned will contain the entire contents of the Namespace object . Also note that an AttributeError will be raised if any argument required by the function is not set in the Namespace object .
186
70
10,196
def console ( self ) : # First, let's parse the arguments parser = argparse . ArgumentParser ( description = self . description ) self . setup_args ( parser ) args = parser . parse_args ( ) # Next, let's run the preprocessors in order for proc in self . _preprocess : try : proc ( args ) except Exception as exc : if getattr ( args , 'debug' , False ) : raise return str ( exc ) # Finally, safely call the underlying function result = self . safe_call ( self . get_kwargs ( args ) , args ) # Now, run the postprocessors in order for proc in self . _postprocess : result = proc ( args , result ) return result
Call the function as a console script . Command line arguments are parsed preprocessors are called then the function is called . If a debug attribute is set by the command line arguments and it is True any exception raised by the underlying function will be reraised ; otherwise the return value will be either the return value of the function or the text contents of the exception .
153
72
10,197
def import_class ( name : Text ) -> Type : parts = name . split ( '.' ) module_name = parts [ : - 1 ] class_name = parts [ - 1 ] module_ = importlib . import_module ( '.' . join ( module_name ) ) return getattr ( module_ , class_name )
Import a class based on its full name .
71
9
10,198
def make_ro ( obj : Any , forgive_type = False ) : if isinstance ( obj , ( str , bytes , int , float , bool , RoDict , RoList ) ) or obj is None : return obj elif isinstance ( obj , Mapping ) : return RoDict ( obj , forgive_type ) elif isinstance ( obj , Sequence ) : return RoList ( obj , forgive_type ) elif forgive_type : return obj else : raise ValueError ( 'Trying to make read-only an object of type "{}"' . format ( obj . __class__ . __name__ ) )
Make a json - serializable type recursively read - only
133
13
10,199
def make_rw ( obj : Any ) : if isinstance ( obj , RoDict ) : return { k : make_rw ( v ) for k , v in obj . items ( ) } elif isinstance ( obj , RoList ) : return [ make_rw ( x ) for x in obj ] else : return obj
Copy a RO object into a RW structure made with standard Python classes .
70
14