idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
8,200
def sum_num_dicts ( dicts , normalize = False ) : sum_dict = { } for dicti in dicts : for key in dicti : sum_dict [ key ] = sum_dict . get ( key , 0 ) + dicti [ key ] if normalize : return norm_int_dict ( sum_dict ) return sum_dict
Sums the given dicts into a single dict mapping each key to the sum of its mappings in all given dicts .
8,201
def reverse_dict ( dict_obj ) : new_dict = { } for key in dict_obj : add_to_dict_val_set ( dict_obj = new_dict , key = dict_obj [ key ] , val = key ) for key in new_dict : new_dict [ key ] = sorted ( new_dict [ key ] , reverse = False ) return new_dict
Reverse a dict so each value in it maps to a sorted list of its keys .
8,202
def reverse_dict_partial ( dict_obj ) : new_dict = { } for key in dict_obj : new_dict [ dict_obj [ key ] ] = key return new_dict
Reverse a dict so each value in it maps to one of its keys .
8,203
def reverse_list_valued_dict ( dict_obj ) : new_dict = { } for key in dict_obj : for element in dict_obj [ key ] : new_dict [ element ] = key return new_dict
Reverse a list - valued dict so each element in a list maps to its key .
8,204
def flatten_dict ( dict_obj , separator = '.' , flatten_lists = False ) : reducer = _get_key_reducer ( separator ) flat = { } def _flatten_key_val ( key , val , parent ) : flat_key = reducer ( parent , key ) try : _flatten ( val , flat_key ) except TypeError : flat [ flat_key ] = val def _flatten ( d , parent = None ) : try : for key , val in d . items ( ) : _flatten_key_val ( key , val , parent ) except AttributeError : if isinstance ( d , ( str , bytes ) ) : raise TypeError for i , value in enumerate ( d ) : _flatten_key_val ( str ( i ) , value , parent ) _flatten ( dict_obj ) return flat
Flattens the given dict into a single - level dict with flattend keys .
8,205
def pprint_int_dict ( int_dict , indent = 4 , descending = False ) : sorted_tup = sorted ( int_dict . items ( ) , key = lambda x : x [ 1 ] ) if descending : sorted_tup . reverse ( ) print ( '{' ) for tup in sorted_tup : print ( '{}{}: {}' . format ( ' ' * indent , tup [ 0 ] , tup [ 1 ] ) ) print ( '}' )
Prints the given dict with int values in a nice way .
8,206
def key_value_nested_generator ( dict_obj ) : for key , value in dict_obj . items ( ) : if isinstance ( value , dict ) : for key , value in key_value_nested_generator ( value ) : yield key , value else : yield key , value
Recursively iterate over key - value pairs of nested dictionaries .
8,207
def key_tuple_value_nested_generator ( dict_obj ) : for key , value in dict_obj . items ( ) : if isinstance ( value , dict ) : for nested_key , value in key_tuple_value_nested_generator ( value ) : yield tuple ( [ key ] ) + nested_key , value else : yield tuple ( [ key ] ) , value
Recursively iterate over key - tuple - value pairs of nested dictionaries .
8,208
def register ( self ) : group = cfg . OptGroup ( self . group_name , title = "HNV (Hyper-V Network Virtualization) Options" ) self . _config . register_group ( group ) self . _config . register_opts ( self . _options , group = group )
Register the current options to the global ConfigOpts object .
8,209
def _language_exclusions ( stem : LanguageStemRange , exclusions : List [ ShExDocParser . LanguageExclusionContext ] ) -> None : for excl in exclusions : excl_langtag = LANGTAG ( excl . LANGTAG ( ) . getText ( ) [ 1 : ] ) stem . exclusions . append ( LanguageStem ( excl_langtag ) if excl . STEM_MARK ( ) else excl_langtag )
languageExclusion = - LANGTAG STEM_MARK?
8,210
def create_thumbnail ( self , image , geometry , upscale = True , crop = None , colorspace = 'RGB' ) : image = self . colorspace ( image , colorspace ) image = self . scale ( image , geometry , upscale , crop ) image = self . crop ( image , geometry , crop ) return image
This serves as a really basic example of a thumbnailing method . You may want to implement your own logic but this will work for simple cases .
8,211
def get_tokens ( self , * , payer_id , credit_card_token_id , start_date , end_date ) : payload = { "language" : self . client . language . value , "command" : PaymentCommand . GET_TOKENS . value , "merchant" : { "apiLogin" : self . client . api_login , "apiKey" : self . client . api_key } , "creditCardTokenInformation" : { "payerId" : payer_id , "creditCardTokenId" : credit_card_token_id , "startDate" : start_date . strftime ( '%Y-%m-%dT%H:%M:%S' ) , "endDate" : end_date . strftime ( '%Y-%m-%dT%H:%M:%S' ) } , "test" : self . client . is_test } return self . client . _post ( self . url , json = payload )
With this functionality you can query previously the Credit Cards Token .
8,212
def remove_token ( self , * , payer_id , credit_card_token_id ) : payload = { "language" : self . client . language . value , "command" : PaymentCommand . REMOVE_TOKEN . value , "merchant" : { "apiLogin" : self . client . api_login , "apiKey" : self . client . api_key } , "removeCreditCardToken" : { "payerId" : payer_id , "creditCardTokenId" : credit_card_token_id } , "test" : self . client . is_test } return self . client . _post ( self . url , json = payload )
This feature allows you to delete a tokenized credit card register .
8,213
def set_file_path ( self , filePath ) : if filePath is not None : assert isinstance ( filePath , basestring ) , "filePath must be None or string" filePath = str ( filePath ) self . __filePath = filePath
Set the file path that needs to be locked .
8,214
def set_lock_pass ( self , lockPass ) : assert isinstance ( lockPass , basestring ) , "lockPass must be string" lockPass = str ( lockPass ) assert '\n' not in lockPass , "lockPass must be not contain a new line" self . __lockPass = lockPass
Set the locking pass
8,215
def set_lock_path ( self , lockPath ) : if lockPath is not None : assert isinstance ( lockPath , basestring ) , "lockPath must be None or string" lockPath = str ( lockPath ) self . __lockPath = lockPath if self . __lockPath is None : if self . __filePath is None : self . __lockPath = os . path . join ( os . getcwd ( ) , ".lock" ) else : self . __lockPath = os . path . join ( os . path . dirname ( self . __filePath ) , '.lock' )
Set the managing lock file path .
8,216
def set_timeout ( self , timeout ) : try : timeout = float ( timeout ) assert timeout >= 0 assert timeout >= self . __wait except : raise Exception ( 'timeout must be a positive number bigger than wait' ) self . __timeout = timeout
set the timeout limit .
8,217
def set_wait ( self , wait ) : try : wait = float ( wait ) assert wait >= 0 except : raise Exception ( 'wait must be a positive number' ) self . __wait = wait
set the waiting time .
8,218
def set_dead_lock ( self , deadLock ) : try : deadLock = float ( deadLock ) assert deadLock >= 0 except : raise Exception ( 'deadLock must be a positive number' ) self . __deadLock = deadLock
Set the dead lock time .
8,219
def release_lock ( self , verbose = VERBOSE , raiseError = RAISE_ERROR ) : if not os . path . isfile ( self . __lockPath ) : released = True code = 0 else : try : with open ( self . __lockPath , 'rb' ) as fd : lock = fd . readlines ( ) except Exception as err : code = Exception ( "Unable to read release lock file '%s' (%s)" % ( self . __lockPath , str ( err ) ) ) released = False if verbose : print ( str ( code ) ) if raiseError : raise code else : if not len ( lock ) : code = 1 released = True elif lock [ 0 ] . rstrip ( ) == self . __lockPass . encode ( ) : try : with open ( self . __lockPath , 'wb' ) as f : f . write ( '' . encode ( ) ) f . flush ( ) os . fsync ( f . fileno ( ) ) except Exception as err : released = False code = Exception ( "Unable to write release lock file '%s' (%s)" % ( self . __lockPath , str ( err ) ) ) if verbose : print ( str ( code ) ) if raiseError : raise code else : released = True code = 2 else : code = 4 released = False if released and self . __fd is not None : try : if not self . __fd . closed : self . __fd . flush ( ) os . fsync ( self . __fd . fileno ( ) ) self . __fd . close ( ) except Exception as err : code = Exception ( "Unable to close file descriptor of locked file '%s' (%s)" % ( self . __filePath , str ( err ) ) ) if verbose : print ( str ( code ) ) if raiseError : raise code else : code = 3 return released , code
Release the lock when set and close file descriptor if opened .
8,220
def import_bert ( self , filename , ** kwargs ) : timestep = kwargs . get ( 'timestep' , None ) if 'timestep' in kwargs : del ( kwargs [ 'timestep' ] ) self . logger . info ( 'Unified data format (BERT/pyGIMLi) file import' ) with LogDataChanges ( self , filter_action = 'import' , filter_query = os . path . basename ( filename ) ) : data , electrodes , topography = reda_bert_import . import_ohm ( filename , ** kwargs ) if timestep is not None : data [ 'timestep' ] = timestep self . _add_to_container ( data ) self . electrode_positions = electrodes if kwargs . get ( 'verbose' , False ) : print ( 'Summary:' ) self . _describe_data ( data )
BERT . ohm file import
8,221
def to_ip ( self ) : if 'chargeability' in self . data . columns : tdip = reda . TDIP ( data = self . data ) else : raise Exception ( 'Missing column "chargeability"' ) return tdip
Return of copy of the data inside a TDIP container
8,222
def sub_filter ( self , subset , filter , inplace = True ) : full_query = '' . join ( ( 'not (' , subset , ') or not (' , filter , ')' ) ) with LogDataChanges ( self , filter_action = 'filter' , filter_query = filter ) : result = self . data . query ( full_query , inplace = inplace ) return result
Apply a filter to subset of the data
8,223
def filter ( self , query , inplace = True ) : with LogDataChanges ( self , filter_action = 'filter' , filter_query = query ) : result = self . data . query ( 'not ({0})' . format ( query ) , inplace = inplace , ) return result
Use a query statement to filter data . Note that you specify the data to be removed!
8,224
def compute_K_analytical ( self , spacing ) : K = redaK . compute_K_analytical ( self . data , spacing = spacing ) self . data = redaK . apply_K ( self . data , K ) redafixK . fix_sign_with_K ( self . data )
Compute geometrical factors over the homogeneous half - space with a constant electrode spacing
8,225
def pseudosection ( self , column = 'r' , filename = None , log10 = False , ** kwargs ) : fig , ax , cb = PS . plot_pseudosection_type2 ( self . data , column = column , log10 = log10 , ** kwargs ) if filename is not None : fig . savefig ( filename , dpi = 300 ) return fig , ax , cb
Plot a pseudosection of the given column . Note that this function only works with dipole - dipole data at the moment .
8,226
def histogram ( self , column = 'r' , filename = None , log10 = False , ** kwargs ) : return_dict = HS . plot_histograms ( self . data , column ) if filename is not None : return_dict [ 'all' ] . savefig ( filename , dpi = 300 ) return return_dict
Plot a histogram of one data column
8,227
def delete_measurements ( self , row_or_rows ) : self . data . drop ( self . data . index [ row_or_rows ] , inplace = True ) self . data = self . data . reset_index ( )
Delete one or more measurements by index of the DataFrame .
8,228
def get_image ( self , source ) : buf = StringIO ( source . read ( ) ) return Image . open ( buf )
Given a file - like object loads it up into a PIL . Image object and returns it .
8,229
def is_valid_image ( self , raw_data ) : buf = StringIO ( raw_data ) try : trial_image = Image . open ( buf ) trial_image . verify ( ) except Exception : return False return True
Checks if the supplied raw data is valid image data .
8,230
def _colorspace ( self , image , colorspace ) : if colorspace == 'RGB' : if image . mode == 'RGBA' : return image if image . mode == 'P' and 'transparency' in image . info : return image . convert ( 'RGBA' ) return image . convert ( 'RGB' ) if colorspace == 'GRAY' : return image . convert ( 'L' ) return image
Sets the image s colorspace . This is typical RGB or GRAY but may be other things depending on your choice of Engine .
8,231
def _get_raw_data ( self , image , format , quality ) : ImageFile . MAXBLOCK = 1024 * 1024 buf = StringIO ( ) try : image . save ( buf , format = format , quality = quality , optimize = 1 ) except IOError : image . save ( buf , format = format , quality = quality ) raw_data = buf . getvalue ( ) buf . close ( ) return raw_data
Returns the raw data from the Image which can be directly written to a something be it a file - like object or a database .
8,232
def enable ( self ) : with self . _lock : if self . _event_listener_thread is None : self . _event_listener_thread = WVAEventListenerThread ( self , self . _http_client ) self . _event_listener_thread . start ( )
Enable the stream thread
8,233
def disable ( self ) : with self . _lock : if self . _event_listener_thread is not None : self . _event_listener_thread . stop ( ) self . _event_listener_thread = None
Disconnect from the event stream
8,234
def get_status ( self ) : with self . _lock : if self . _event_listener_thread is None : return EVENT_STREAM_STATE_DISABLED else : return self . _event_listener_thread . get_state ( )
Get the current status of the event stream system
8,235
def _parse_one_event ( self ) : try : open_brace_idx = self . _buf . index ( '{' ) except ValueError : self . _buf = six . u ( '' ) else : if open_brace_idx > 0 : self . _buf = self . _buf [ open_brace_idx : ] try : event , idx = self . _decoder . raw_decode ( self . _buf ) self . _buf = self . _buf [ idx : ] return event except ValueError : return None
Parse the stream buffer and return either a single event or None
8,236
def guess_codec ( file , errors = "strict" , require_char = False ) : gedcom_char_to_codec = { 'ansel' : 'gedcom' , } bom_codec = check_bom ( file ) bom_size = file . tell ( ) codec = bom_codec or 'gedcom' while True : line = file . readline ( ) if not line : raise IOError ( "Unexpected EOF while reading GEDCOM header" ) line = line . lstrip ( ) . rstrip ( b"\r\n" ) words = line . split ( ) if len ( words ) >= 2 and words [ 0 ] == b"0" and words [ 1 ] != b"HEAD" : if require_char : raise CodecError ( "GEDCOM header does not have CHAR record" ) else : break elif len ( words ) >= 3 and words [ 0 ] == b"1" and words [ 1 ] == b"CHAR" : try : encoding = words [ 2 ] . decode ( codec , errors ) encoding = gedcom_char_to_codec . get ( encoding . lower ( ) , encoding . lower ( ) ) new_codec = codecs . lookup ( encoding ) . name except LookupError : raise CodecError ( "Unknown codec name {0}" . format ( encoding ) ) if bom_codec is None : codec = new_codec elif new_codec != bom_codec : raise CodecError ( "CHAR codec {0} is different from BOM " "codec {1}" . format ( new_codec , bom_codec ) ) break return codec , bom_size
Look at file contents and guess its correct encoding .
8,237
def records0 ( self , tag = None ) : _log . debug ( "in records0" ) for offset , xtag in self . index0 : _log . debug ( " records0: offset: %s; xtag: %s" , offset , xtag ) if tag is None or tag == xtag : yield self . read_record ( offset )
Iterator over all level = 0 records .
8,238
def read_record ( self , offset ) : _log . debug ( "in read_record(%s)" , offset ) stack = [ ] reclevel = None for gline in self . gedcom_lines ( offset ) : _log . debug ( " read_record, gline: %s" , gline ) level = gline . level if reclevel is None : reclevel = level elif level <= reclevel : break for rec in reversed ( stack [ level : ] ) : if rec : if rec . value is not None : rec . value = rec . value . decode ( self . _encoding , self . _errors ) rec . freeze ( ) del stack [ level + 1 : ] stack . extend ( [ None ] * ( level + 1 - len ( stack ) ) ) parent = stack [ level - 1 ] if level > 0 else None rec = self . _make_record ( parent , gline ) stack [ level ] = rec for rec in reversed ( stack [ reclevel : ] ) : if rec : if rec . value is not None : rec . value = rec . value . decode ( self . _encoding , self . _errors ) rec . freeze ( ) _log . debug ( " read_record, rec: %s" , rec ) return stack [ reclevel ] if stack else None
Read next complete record from a file starting at given position .
8,239
def _make_record ( self , parent , gline ) : if parent and gline . tag in ( "CONT" , "CONC" ) : if parent . tag != "BLOB" : value = gline . value if gline . tag == "CONT" : value = b"\n" + ( value or b"" ) if value is not None : parent . value = ( parent . value or b"" ) + value return None dialect = model . DIALECT_DEFAULT if not ( gline . level == 0 and gline . tag == "HEAD" ) and self . _header : dialect = self . dialect rec = model . make_record ( level = gline . level , xref_id = gline . xref_id , tag = gline . tag , value = gline . value , sub_records = [ ] , offset = gline . offset , dialect = dialect , parser = self ) if parent : parent . sub_records . append ( rec ) return rec
Process next record .
8,240
def validate_options ( subscription_key , text ) : if not subscription_key or len ( subscription_key ) == 0 : print 'Error: Warning the option subscription_key should contain a string.' print USAGE sys . exit ( 3 ) if not text or len ( text ) == 0 : print 'Error: Warning the option text should contain a string.' print USAGE sys . exit ( 3 )
Perform sanity checks on threshold values
8,241
def main ( ) : parser = OptionParser ( ) parser . add_option ( '-n' , '--subscription_key' , dest = 'subscription_key' , help = 'subscription_key for authentication' ) parser . add_option ( '-t' , '--text' , dest = 'text' , help = 'text to synthesize' ) parser . add_option ( '-l' , '--language' , dest = 'language' , help = 'language' ) parser . add_option ( '-g' , '--gender' , dest = 'gender' , help = 'gender' ) parser . add_option ( '-d' , '--directory' , dest = 'directory' , help = 'directory to store the file' ) ( options , args ) = parser . parse_args ( ) subscription_key = options . subscription_key text = options . text language = options . language gender = options . gender directory = options . directory validate_options ( subscription_key , text ) if not directory : directory = default_directory if not language : language = default_language if not gender : gender = default_gender format = 'riff-8khz-8bit-mono-mulaw' tts_msspeak = MSSpeak ( subscription_key , '/tmp/' ) tts_msspeak . set_cache ( False ) output_filename = tts_msspeak . speak ( text , language , gender , format ) print 'Recorded TTS to %s%s' % ( directory , output_filename )
Parse options and process text to Microsoft Translate
8,242
def sample ( self ) : data = self . _http_client . get ( "vehicle/data/{}" . format ( self . name ) ) [ self . name ] dt = arrow . get ( data [ "timestamp" ] ) . datetime value = data [ "value" ] return VehicleDataSample ( value , dt )
Get the current value of this vehicle data element
8,243
def _get_or_create_bucket ( self , name ) : try : return self . connection . get_bucket ( name ) except S3ResponseError , e : if AUTO_CREATE_BUCKET : return self . connection . create_bucket ( name ) raise ImproperlyConfigured , ( "Bucket specified by " "AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be " "automatically created by setting AWS_AUTO_CREATE_BUCKET=True" )
Retrieves a bucket if it exists otherwise creates it .
8,244
def _compress_content ( self , content ) : zbuf = StringIO ( ) zfile = GzipFile ( mode = 'wb' , compresslevel = 6 , fileobj = zbuf ) zfile . write ( content . read ( ) ) zfile . close ( ) content . file = zbuf return content
Gzip a given string .
8,245
def url ( self , name ) : name = urllib . quote_plus ( self . _clean_name ( name ) , safe = '/' ) if self . bucket_cname : return "http://%s/%s" % ( self . bucket_cname , name ) elif self . host : return "http://%s/%s/%s" % ( self . host , self . bucket_name , name ) return "http://s3.amazonaws.com/%s/%s" % ( self . bucket_name , name )
Since we assume all public storage with no authorization keys we can just simply dump out a URL rather than having to query S3 for new keys .
8,246
def normalize_weekly ( data ) : if "tblMenu" not in data [ "result_data" ] [ "Document" ] : data [ "result_data" ] [ "Document" ] [ "tblMenu" ] = [ ] if isinstance ( data [ "result_data" ] [ "Document" ] [ "tblMenu" ] , dict ) : data [ "result_data" ] [ "Document" ] [ "tblMenu" ] = [ data [ "result_data" ] [ "Document" ] [ "tblMenu" ] ] for day in data [ "result_data" ] [ "Document" ] [ "tblMenu" ] : if "tblDayPart" not in day : continue if isinstance ( day [ "tblDayPart" ] , dict ) : day [ "tblDayPart" ] = [ day [ "tblDayPart" ] ] for meal in day [ "tblDayPart" ] : if isinstance ( meal [ "tblStation" ] , dict ) : meal [ "tblStation" ] = [ meal [ "tblStation" ] ] for station in meal [ "tblStation" ] : if isinstance ( station [ "tblItem" ] , dict ) : station [ "tblItem" ] = [ station [ "tblItem" ] ] return data
Normalization for dining menu data
8,247
def get_meals ( v2_response , building_id ) : result_data = v2_response [ "result_data" ] meals = [ ] day_parts = result_data [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "dayparts" ] [ 0 ] for meal in day_parts : stations = [ ] for station in meal [ "stations" ] : items = [ ] for item_id in station [ "items" ] : item = result_data [ "items" ] [ item_id ] new_item = { } new_item [ "txtTitle" ] = item [ "label" ] new_item [ "txtPrice" ] = "" new_item [ "txtNutritionInfo" ] = "" new_item [ "txtDescription" ] = item [ "description" ] new_item [ "tblSide" ] = "" new_item [ "tblFarmToFork" ] = "" attrs = [ { "description" : item [ "cor_icon" ] [ attr ] } for attr in item [ "cor_icon" ] ] if len ( attrs ) == 1 : new_item [ "tblAttributes" ] = { "txtAttribute" : attrs [ 0 ] } elif len ( attrs ) > 1 : new_item [ "tblAttributes" ] = { "txtAttribute" : attrs } else : new_item [ "tblAttributes" ] = "" if isinstance ( item [ "options" ] , list ) : item [ "options" ] = { } if "values" in item [ "options" ] : for side in item [ "options" ] [ "values" ] : new_item [ "tblSide" ] = { "txtSideName" : side [ "label" ] } items . append ( new_item ) stations . append ( { "tblItem" : items , "txtStationDescription" : station [ "label" ] } ) meals . append ( { "tblStation" : stations , "txtDayPartDescription" : meal [ "label" ] } ) return meals
Extract meals into old format from a DiningV2 JSON response
8,248
def menu ( self , venue_id , date ) : query = "&date=" + date response = self . _request ( V2_ENDPOINTS [ 'MENUS' ] + venue_id + query ) return response
Get the menu for the venue corresponding to venue_id on date .
8,249
def venues ( self ) : response = self . _request ( V2_ENDPOINTS [ 'VENUES' ] ) for venue in response [ "result_data" ] [ "document" ] [ "venue" ] : if venue . get ( "id" ) in VENUE_NAMES : venue [ "name" ] = VENUE_NAMES [ venue . get ( "id" ) ] if isinstance ( venue . get ( "dateHours" ) , dict ) : venue [ "dateHours" ] = [ venue [ "dateHours" ] ] if "dateHours" in venue : for dh in venue [ "dateHours" ] : if isinstance ( dh . get ( "meal" ) , dict ) : dh [ "meal" ] = [ dh [ "meal" ] ] return response
Get a list of all venue objects .
8,250
def menu_daily ( self , building_id ) : today = str ( datetime . date . today ( ) ) v2_response = DiningV2 ( self . bearer , self . token ) . menu ( building_id , today ) response = { 'result_data' : { 'Document' : { } } } response [ "result_data" ] [ "Document" ] [ "menudate" ] = datetime . datetime . strptime ( today , '%Y-%m-%d' ) . strftime ( '%-m/%d/%Y' ) if building_id in VENUE_NAMES : response [ "result_data" ] [ "Document" ] [ "location" ] = VENUE_NAMES [ building_id ] else : response [ "result_data" ] [ "Document" ] [ "location" ] = v2_response [ "result_data" ] [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "name" ] response [ "result_data" ] [ "Document" ] [ "tblMenu" ] = { "tblDayPart" : get_meals ( v2_response , building_id ) } return response
Get a menu object corresponding to the daily menu for the venue with building_id .
8,251
def menu_weekly ( self , building_id ) : din = DiningV2 ( self . bearer , self . token ) response = { 'result_data' : { 'Document' : { } } } days = [ ] for i in range ( 7 ) : date = str ( datetime . date . today ( ) + datetime . timedelta ( days = i ) ) v2_response = din . menu ( building_id , date ) if building_id in VENUE_NAMES : response [ "result_data" ] [ "Document" ] [ "location" ] = VENUE_NAMES [ building_id ] else : response [ "result_data" ] [ "Document" ] [ "location" ] = v2_response [ "result_data" ] [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "name" ] formatted_date = datetime . datetime . strptime ( date , '%Y-%m-%d' ) . strftime ( '%-m/%d/%Y' ) days . append ( { "tblDayPart" : get_meals ( v2_response , building_id ) , "menudate" : formatted_date } ) response [ "result_data" ] [ "Document" ] [ "tblMenu" ] = days return normalize_weekly ( response )
Get an array of menu objects corresponding to the weekly menu for the venue with building_id .
8,252
def to_cr ( self ) : data_new = self . data . copy ( ) data_new [ 'rpha' ] = - 1.5 * data_new [ 'chargeability' ] data_new [ 'Zt' ] = data_new [ 'r' ] * np . exp ( data_new [ 'rpha' ] * 1j / 1000.0 ) cr = reda . CR ( data = data_new ) return cr
Convert container to a complex resistivity container using the CPA - conversion .
8,253
def apply ( filter ) : def decorator ( callable ) : return lambda * args , ** kwargs : filter ( callable ( * args , ** kwargs ) ) return decorator
Manufacture decorator that filters return value with given function .
8,254
def format_outpat ( outpat , xn ) : return outpat . format ( year = str ( xn . date . year ) , month = '{:02}' . format ( xn . date . month ) , fy = str ( xn . date . year if xn . date . month < 7 else xn . date . year + 1 ) , date = xn . date )
Format an outpat for the given transaction .
8,255
def get ( self , name , acc = None , default = None ) : if acc in self . data [ 'accounts' ] and name in self . data [ 'accounts' ] [ acc ] : return self . data [ 'accounts' ] [ acc ] [ name ] if name in self . data : return self . data [ name ] return default
Return the named config for the given account .
8,256
def outdir ( self , acc = None ) : rootdir = self . rootdir ( ) outdir = self . get ( 'outdir' , acc = acc ) dir = os . path . join ( rootdir , outdir ) if rootdir and outdir else None if not os . path . exists ( dir ) : os . makedirs ( dir ) return dir
Return the outdir for the given account .
8,257
def outpat ( self , acc = None ) : outdir = self . outdir ( acc ) outpat = self . get ( 'outpat' , acc = acc ) return os . path . join ( outdir , outpat ) if outdir and outpat else None
Determine the full outfile pattern for the given account .
8,258
def rulesdir ( self , acc = None ) : rootdir = self . rootdir ( ) rulesdir = self . get ( 'rulesdir' , acc = acc , default = [ ] ) return os . path . join ( rootdir , rulesdir ) if rootdir and rulesdir else None
Determine the rulesdir for the given account .
8,259
def rulefiles ( self , acc = None ) : rulesdir = self . rulesdir ( acc ) rules = [ os . path . join ( rulesdir , x ) for x in self . get ( 'rules' , acc , [ ] ) ] if acc is not None : rules += self . rulefiles ( acc = None ) return rules
Return a list of rulefiles for the given account .
8,260
def download_data ( identifier , outdir ) : if use_local_data_repository is not None : url_base = 'file:' + request . pathname2url ( use_local_data_repository + os . sep ) else : url_base = repository_url print ( 'url_base: {}' . format ( url_base ) ) url = url_base + inventory_filename filename , headers = request . urlretrieve ( url ) df = pd . read_csv ( filename , delim_whitespace = True , comment = '#' , header = None , names = [ 'identifier' , 'rel_path' ] , ) rel_path_query = df . query ( 'identifier == "{}"' . format ( identifier ) ) if rel_path_query . shape [ 0 ] == 0 : raise Exception ( 'identifier not found' ) rel_path = rel_path_query [ 'rel_path' ] . values [ 0 ] url = url_base + rel_path print ( 'data url: {}' . format ( url ) ) filename , headers = request . urlretrieve ( url ) if not os . path . isdir ( outdir ) : os . makedirs ( outdir ) zip_obj = zipfile . ZipFile ( filename ) zip_obj . extractall ( outdir )
Download data from a separate data repository for testing .
8,261
def append ( self , item ) : if item in self : self . items [ item [ 0 ] ] . append ( item [ 1 ] ) else : self . items [ item [ 0 ] ] = [ item [ 1 ] ]
Append an item to the score set .
8,262
def scores ( self ) : return map ( lambda x : ( x [ 0 ] , sum ( x [ 1 ] ) * len ( x [ 1 ] ) ** - .5 ) , iter ( self . items . viewitems ( ) ) )
Return a list of the items with their final scores .
8,263
def highest ( self ) : scores = self . scores ( ) if not scores : return None maxscore = max ( map ( score , scores ) ) return filter ( lambda x : score ( x ) == maxscore , scores )
Return the items with the higest score .
8,264
def is_empty_shape ( sh : ShExJ . Shape ) -> bool : return sh . closed is None and sh . expression is None and sh . extra is None and sh . semActs is None
Determine whether sh has any value
8,265
def fix_text_escapes ( self , txt : str , quote_char : str ) -> str : def _subf ( matchobj ) : return matchobj . group ( 0 ) . translate ( self . re_trans_table ) if quote_char : txt = re . sub ( r'\\' + quote_char , quote_char , txt ) return re . sub ( r'\\.' , _subf , txt , flags = re . MULTILINE + re . DOTALL + re . UNICODE )
Fix the various text escapes
8,266
def fix_re_escapes ( self , txt : str ) -> str : def _subf ( matchobj ) : o = matchobj . group ( 0 ) . translate ( self . re_trans_table ) if o [ 1 ] in '\b\f\n\t\r' : return o [ 0 ] + 'bfntr' [ '\b\f\n\t\r' . index ( o [ 1 ] ) ] else : return o if o [ 1 ] in '\\.?*+^$()[]{|}' else o [ 1 ] return re . sub ( r'\\.' , _subf , txt , flags = re . MULTILINE + re . DOTALL + re . UNICODE )
The ShEx RE engine allows escaping any character . We have to remove that escape for everything except those that CAN be legitimately escaped
8,267
def _iter_response ( self , url , params = None ) : if params is None : params = { } params [ 'page_number' ] = 1 while True : response = self . _request ( url , params ) for item in response [ 'result_data' ] : yield item if response [ 'service_meta' ] [ 'next_page_number' ] == params [ 'page_number' ] : break params [ 'page_number' ] += 1
Return an enumerable that iterates through a multi - page API request
8,268
def search ( self , params , validate = False ) : if self . val_info is None : self . val_info = self . search_params ( ) if validate : errors = self . validate ( self . val_info , params ) if not validate or len ( errors ) == 0 : return self . _iter_response ( ENDPOINTS [ 'SEARCH' ] , params ) else : return { 'Errors' : errors }
Return a generator of section objects for the given search params .
8,269
def course ( self , dept , course_number ) : response = self . _request ( path . join ( ENDPOINTS [ 'CATALOG' ] , dept , course_number ) ) return response [ 'result_data' ] [ 0 ]
Return an object of semester - independent course info . All arguments should be strings .
8,270
def section ( self , dept , course_number , sect_number ) : section_id = dept + course_number + sect_number sections = self . search ( { 'course_id' : section_id } ) try : return next ( sections ) except StopIteration : raise ValueError ( 'Section %s not found' % section_id )
Return a single section object for the given section . All arguments should be strings . Throws a ValueError if the section is not found .
8,271
def parse_to_gvid ( v ) : from geoid . civick import GVid from geoid . acs import AcsGeoid m1 = '' try : return GVid . parse ( v ) except ValueError as e : m1 = str ( e ) try : return AcsGeoid . parse ( v ) . convert ( GVid ) except ValueError as e : raise ValueError ( "Failed to parse to either ACS or GVid: {}; {}" . format ( m1 , str ( e ) ) )
Parse an ACS Geoid or a GVID to a GVID
8,272
def base62_decode ( string ) : alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' base = len ( alphabet ) strlen = len ( string ) num = 0 idx = 0 for char in string : power = ( strlen - ( idx + 1 ) ) num += alphabet . index ( char ) * ( base ** power ) idx += 1 return int ( num )
Decode a Base X encoded string into the number
8,273
def make_classes ( base_class , module ) : from functools import partial for k in names : cls = base_class . class_factory ( k . capitalize ( ) ) cls . augment ( ) setattr ( module , k . capitalize ( ) , cls ) setattr ( module , 'get_class' , partial ( get_class , module ) )
Create derived classes and put them into the same module as the base class .
8,274
def generate_all ( sumlevel , d ) : from geoid . civick import GVid from geoid . tiger import TigerGeoid from geoid . acs import AcsGeoid sumlevel = int ( sumlevel ) d = dict ( d . items ( ) ) if 'cousub' in d : d [ 'cosub' ] = d [ 'cousub' ] del d [ 'cousub' ] if 'blkgrp' in d : d [ 'blockgroup' ] = d [ 'blkgrp' ] del d [ 'blkgrp' ] if 'zcta5' in d : d [ 'zcta' ] = d [ 'zcta5' ] del d [ 'zcta5' ] gvid_class = GVid . resolve_summary_level ( sumlevel ) if not gvid_class : return { } geoidt_class = TigerGeoid . resolve_summary_level ( sumlevel ) geoid_class = AcsGeoid . resolve_summary_level ( sumlevel ) try : return dict ( gvid = str ( gvid_class ( ** d ) ) , geoid = str ( geoid_class ( ** d ) ) , geoidt = str ( geoidt_class ( ** d ) ) ) except : raise
Generate a dict that includes all of the available geoid values with keys for the most common names for those values .
8,275
def _generate_names ( ) : from ambry import get_library l = get_library ( ) counties = l . partition ( 'census.gov-acs-geofile-2009-geofile50-20095-50' ) states = l . partition ( 'census.gov-acs-geofile-2009-geofile40-20095-40' ) names = { } for row in counties . remote_datafile . reader : names [ ( row . state , row . county ) ] = row . name for row in states . remote_datafile . reader : if row . component == '00' : names [ ( row . state , 0 ) ] = row . name pprint . pprint ( names )
Code to generate the state and county names
8,276
def division_name ( self ) : try : return next ( e for e in self . type_names_re . search ( self . name ) . groups ( ) if e is not None ) except AttributeError : return ''
The type designation for the county or county equivalent such as County Parish or Borough
8,277
def augment ( cls ) : import re level_name = cls . __name__ . lower ( ) cls . sl = names [ level_name ] cls . class_map [ cls . __name__ . lower ( ) ] = cls cls . sl_map [ cls . sl ] = cls cls . fmt = cls . make_format_string ( cls . __name__ . lower ( ) ) cls . regex_str = cls . make_regex ( cls . __name__ . lower ( ) ) cls . regex = re . compile ( cls . regex_str ) cls . level = level_name cls . fields = segments [ cls . sl ]
Augment the class with computed formats regexes and other things . This caches these values so they don t have to be created for every instance .
8,278
def get_class ( cls , name_or_sl ) : try : return cls . sl_map [ int ( name_or_sl ) ] except TypeError as e : raise TypeError ( "Bad name or sl: {} : {}" . format ( name_or_sl , e ) ) except ValueError : try : return cls . class_map [ name_or_sl . lower ( ) ] except ( KeyError , ValueError ) : raise NotASummaryName ( "Value '{}' is not a valid summary level" . format ( name_or_sl ) )
Return a derived class based on the class name or the summary_level
8,279
def geo_name ( self ) : if self . level == 'county' : return str ( self . county_name ) elif self . level == 'state' : return self . state_name else : if hasattr ( self , 'county' ) : return "{} in {}" . format ( self . level , str ( self . county_name ) ) elif hasattr ( self , 'state' ) : return "{} in {}" . format ( self . level , self . state_name ) else : return "a {}" . format ( self . level )
Return a name of the state or county or for other lowever levels the name of the level type in the county .
8,280
def parse ( cls , gvid , exception = True ) : if gvid == 'invalid' : return cls . get_class ( 'null' ) ( 0 ) if not bool ( gvid ) : return None if not isinstance ( gvid , six . string_types ) : raise TypeError ( "Can't parse; not a string. Got a '{}' " . format ( type ( gvid ) ) ) try : if not cls . sl : if six . PY3 : fn = cls . decode else : fn = cls . decode . __func__ sl = fn ( gvid [ 0 : cls . sl_width ] ) else : sl = cls . sl except ValueError as e : if exception : raise ValueError ( "Failed to parse gvid '{}': {}" . format ( gvid , str ( e ) ) ) else : return cls . get_class ( 'null' ) ( 0 ) try : cls = cls . sl_map [ sl ] except KeyError : if exception : raise ValueError ( "Failed to parse gvid '{}': Unknown summary level '{}' " . format ( gvid , sl ) ) else : return cls . get_class ( 'null' ) ( 0 ) m = cls . regex . match ( gvid ) if not m : raise ValueError ( "Failed to match '{}' to '{}' " . format ( gvid , cls . regex_str ) ) d = m . groupdict ( ) if not d : return None if six . PY3 : fn = cls . decode else : fn = cls . decode . __func__ d = { k : fn ( v ) for k , v in d . items ( ) } try : del d [ 'sl' ] except KeyError : pass return cls ( ** d )
Parse a string value into the geoid of this class .
8,281
def convert ( self , root_cls ) : d = self . __dict__ d [ 'sl' ] = self . sl try : cls = root_cls . get_class ( root_cls . sl ) except ( AttributeError , TypeError ) : cls = root_cls . get_class ( self . sl ) return cls ( ** d )
Convert to another derived class . cls is the base class for the derived type ie AcsGeoid TigerGeoid etc .
8,282
def promote ( self , level = None ) : if level is None : if len ( self . fields ) < 2 : if self . level in ( 'region' , 'division' , 'state' , 'ua' ) : cls = self . get_class ( 'us' ) else : return None else : cls = self . get_class ( self . fields [ - 2 ] ) else : cls = self . get_class ( level ) d = dict ( self . __dict__ . items ( ) ) d [ 'sl' ] = self . sl return cls ( ** d )
Convert to the next higher level summary level
8,283
def allval ( self ) : d = dict ( self . __dict__ . items ( ) ) d [ 'sl' ] = self . sl d [ self . level ] = 0 cls = self . get_class ( self . sl ) return cls ( ** d )
Convert the last value to zero . This form represents the entire higher summary level at the granularity of the lower summary level . For example for a county it means All counties in the state
8,284
def nullval ( cls ) : d = dict ( cls . __dict__ . items ( ) ) for k in d : d [ k ] = 0 d [ 'sl' ] = cls . sl d [ cls . level ] = 0 return cls ( ** d )
Create a new instance where all of the values are 0
8,285
def split_name ( name ) : given1 , _ , rem = name . partition ( "/" ) surname , _ , given2 = rem . partition ( "/" ) return given1 . strip ( ) , surname . strip ( ) , given2 . strip ( )
Extracts pieces of name from full name string .
8,286
def parse_name_altree ( record ) : name_tuple = split_name ( record . value ) if name_tuple [ 1 ] == '?' : name_tuple = ( name_tuple [ 0 ] , '' , name_tuple [ 2 ] ) maiden = record . sub_tag_value ( "SURN" ) if maiden : ending = '(' + maiden + ')' surname = name_tuple [ 1 ] if surname . endswith ( ending ) : surname = surname [ : - len ( ending ) ] . rstrip ( ) if surname == '?' : surname = '' name_tuple = ( name_tuple [ 0 ] , surname , name_tuple [ 2 ] , maiden ) return name_tuple
Parse NAME structure assuming ALTREE dialect .
8,287
def parse_name_myher ( record ) : name_tuple = split_name ( record . value ) married = record . sub_tag_value ( "_MARNM" ) if married : maiden = name_tuple [ 1 ] name_tuple = ( name_tuple [ 0 ] , married , name_tuple [ 2 ] , maiden ) return name_tuple
Parse NAME structure assuming MYHERITAGE dialect .
8,288
def number ( items ) : n = len ( items ) if n == 0 : return items places = str ( int ( math . log10 ( n ) // 1 + 1 ) ) format = '[{0[0]:' + str ( int ( places ) ) + 'd}] {0[1]}' return map ( lambda x : format . format ( x ) , enumerate ( items ) )
Maps numbering onto given values
8,289
def filter_yn ( string , default = None ) : if string . startswith ( ( 'Y' , 'y' ) ) : return True elif string . startswith ( ( 'N' , 'n' ) ) : return False elif not string and default is not None : return True if default else False raise InvalidInputError
Return True if yes False if no or the default .
8,290
def filter_int ( string , default = None , start = None , stop = None ) : try : i = int ( string ) if start is not None and i < start : raise InvalidInputError ( "value too small" ) if stop is not None and i >= stop : raise InvalidInputError ( "value too large" ) return i except ValueError : if not string and default is not None : return default else : raise InvalidInputError
Return the input integer or the default .
8,291
def filter_decimal ( string , default = None , lower = None , upper = None ) : try : d = decimal . Decimal ( string ) if lower is not None and d < lower : raise InvalidInputError ( "value too small" ) if upper is not None and d >= upper : raise InvalidInputError ( "value too large" ) return d except decimal . InvalidOperation : if not string and default is not None : return default else : raise InvalidInputError ( "invalid decimal number" )
Return the input decimal number or the default .
8,292
def filter_pastdate ( string , default = None ) : if not string and default is not None : return default today = datetime . date . today ( ) try : parts = map ( int , re . split ( '\D+' , string ) ) except ValueError : raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" ) if len ( parts ) < 1 or len ( parts ) > 3 : raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" ) if len ( parts ) == 1 : parts . append ( today . month - 1 if parts [ 0 ] > today . day else today . month ) if parts [ 1 ] < 1 : parts [ 1 ] = 12 if len ( parts ) == 2 : if parts [ 1 ] > today . month or parts [ 1 ] == today . month and parts [ 0 ] > today . day : parts . append ( today . year - 1 ) else : parts . append ( today . year ) parts . reverse ( ) try : date = datetime . date ( * parts ) if date > today : raise InvalidInputError ( "cannot choose a date in the future" ) return date except ValueError : print parts raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" )
Coerce to a date not beyond the current date
8,293
def input ( self , filter_fn , prompt ) : while True : try : return filter_fn ( raw_input ( prompt ) ) except InvalidInputError as e : if e . message : self . show ( 'ERROR: ' + e . message ) except KeyboardInterrupt : raise RejectWarning
Prompt user until valid input is received .
8,294
def text ( self , prompt , default = None ) : prompt = prompt if prompt is not None else 'Enter some text' prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return self . input ( curry ( filter_text , default = default ) , prompt )
Prompts the user for some text with optional default
8,295
def decimal ( self , prompt , default = None , lower = None , upper = None ) : prompt = prompt if prompt is not None else "Enter a decimal number" prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return self . input ( curry ( filter_decimal , default = default , lower = lower , upper = upper ) , prompt )
Prompts user to input decimal with optional default and bounds .
8,296
def pastdate ( self , prompt , default = None ) : prompt = prompt if prompt is not None else "Enter a past date" if default is not None : prompt += " [" + default . strftime ( '%d %m %Y' ) + "]" prompt += ': ' return self . input ( curry ( filter_pastdate , default = default ) , prompt )
Prompts user to input a date in the past .
8,297
def choose ( self , prompt , items , default = None ) : if default is not None and ( default >= len ( items ) or default < 0 ) : raise IndexError prompt = prompt if prompt is not None else "Choose from following:" self . show ( prompt + '\n' ) self . show ( "\n" . join ( number ( items ) ) ) prompt = "Enter number of chosen item" prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return items [ self . input ( curry ( filter_int , default = default , start = 0 , stop = len ( items ) ) , prompt ) ]
Prompts the user to choose one item from a list .
8,298
def goose_extractor ( url ) : article = Goose ( ) . extract ( url = url ) return article . title , article . meta_description , article . cleaned_text
webpage extraction using Goose Library
8,299
def _tokenize ( sentence ) : _tokens = nltk . word_tokenize ( sentence ) tokens = [ stemmer . stem ( tk ) for tk in _tokens ] return tokens
Tokenizer and Stemmer