idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
8,200
def _colorspace ( self , image , colorspace ) : if colorspace == 'RGB' : if image . mode == 'RGBA' : # RGBA is just RGB + Alpha return image if image . mode == 'P' and 'transparency' in image . info : return image . convert ( 'RGBA' ) return image . convert ( 'RGB' ) if colorspace == 'GRAY' : return image . convert ( 'L' ) return image
Sets the image s colorspace . This is typical RGB or GRAY but may be other things depending on your choice of Engine .
99
27
8,201
def _get_raw_data ( self , image , format , quality ) : ImageFile . MAXBLOCK = 1024 * 1024 buf = StringIO ( ) try : # ptimize makes the encoder do a second pass over the image, if # the format supports it. image . save ( buf , format = format , quality = quality , optimize = 1 ) except IOError : # optimize is a no-go, omit it this attempt. image . save ( buf , format = format , quality = quality ) raw_data = buf . getvalue ( ) buf . close ( ) return raw_data
Returns the raw data from the Image which can be directly written to a something be it a file - like object or a database .
126
26
8,202
def enable ( self ) : with self . _lock : if self . _event_listener_thread is None : self . _event_listener_thread = WVAEventListenerThread ( self , self . _http_client ) self . _event_listener_thread . start ( )
Enable the stream thread
63
4
8,203
def disable ( self ) : with self . _lock : if self . _event_listener_thread is not None : self . _event_listener_thread . stop ( ) self . _event_listener_thread = None
Disconnect from the event stream
50
6
8,204
def get_status ( self ) : with self . _lock : if self . _event_listener_thread is None : return EVENT_STREAM_STATE_DISABLED else : return self . _event_listener_thread . get_state ( )
Get the current status of the event stream system
56
9
8,205
def _parse_one_event ( self ) : # WVA includes \r\n between messages which the parser doesn't like, so we # throw away any data before a opening brace try : open_brace_idx = self . _buf . index ( '{' ) except ValueError : self . _buf = six . u ( '' ) # no brace found else : if open_brace_idx > 0 : self . _buf = self . _buf [ open_brace_idx : ] try : event , idx = self . _decoder . raw_decode ( self . _buf ) self . _buf = self . _buf [ idx : ] return event except ValueError : return None
Parse the stream buffer and return either a single event or None
153
13
8,206
def guess_codec ( file , errors = "strict" , require_char = False ) : # mapping of gedcom character set specifiers to Python encoding names gedcom_char_to_codec = { 'ansel' : 'gedcom' , } # check BOM first bom_codec = check_bom ( file ) bom_size = file . tell ( ) codec = bom_codec or 'gedcom' # scan header until CHAR or end of header while True : # this stops at '\n' line = file . readline ( ) if not line : raise IOError ( "Unexpected EOF while reading GEDCOM header" ) # do not decode bytes to strings here, reason is that some # stupid apps split CONC record at byte level (in middle of # of multi-byte characters). This implies that we can only # work with encodings that have ASCII as single-byte subset. line = line . lstrip ( ) . rstrip ( b"\r\n" ) words = line . split ( ) if len ( words ) >= 2 and words [ 0 ] == b"0" and words [ 1 ] != b"HEAD" : # past header but have not seen CHAR if require_char : raise CodecError ( "GEDCOM header does not have CHAR record" ) else : break elif len ( words ) >= 3 and words [ 0 ] == b"1" and words [ 1 ] == b"CHAR" : try : encoding = words [ 2 ] . decode ( codec , errors ) encoding = gedcom_char_to_codec . get ( encoding . lower ( ) , encoding . lower ( ) ) new_codec = codecs . lookup ( encoding ) . name except LookupError : raise CodecError ( "Unknown codec name {0}" . format ( encoding ) ) if bom_codec is None : codec = new_codec elif new_codec != bom_codec : raise CodecError ( "CHAR codec {0} is different from BOM " "codec {1}" . format ( new_codec , bom_codec ) ) break return codec , bom_size
Look at file contents and guess its correct encoding .
462
10
8,207
def records0 ( self , tag = None ) : _log . debug ( "in records0" ) for offset , xtag in self . index0 : _log . debug ( " records0: offset: %s; xtag: %s" , offset , xtag ) if tag is None or tag == xtag : yield self . read_record ( offset )
Iterator over all level = 0 records .
82
8
8,208
def read_record ( self , offset ) : _log . debug ( "in read_record(%s)" , offset ) stack = [ ] # stores per-level current records reclevel = None for gline in self . gedcom_lines ( offset ) : _log . debug ( " read_record, gline: %s" , gline ) level = gline . level if reclevel is None : # this is the first record, remember its level reclevel = level elif level <= reclevel : # stop at the record of the same or higher (smaller) level break # All previously seen records at this level and below can # be finalized now for rec in reversed ( stack [ level : ] ) : # decode bytes value into string if rec : if rec . value is not None : rec . value = rec . value . decode ( self . _encoding , self . _errors ) rec . freeze ( ) # _log.debug(" read_record, rec: %s", rec) del stack [ level + 1 : ] # extend stack to fit this level (and make parent levels if needed) stack . extend ( [ None ] * ( level + 1 - len ( stack ) ) ) # make Record out of it (it can be updated later) parent = stack [ level - 1 ] if level > 0 else None rec = self . _make_record ( parent , gline ) # store as current record at this level stack [ level ] = rec for rec in reversed ( stack [ reclevel : ] ) : if rec : if rec . value is not None : rec . value = rec . value . decode ( self . _encoding , self . _errors ) rec . freeze ( ) _log . debug ( " read_record, rec: %s" , rec ) return stack [ reclevel ] if stack else None
Read next complete record from a file starting at given position .
388
12
8,209
def _make_record ( self , parent , gline ) : if parent and gline . tag in ( "CONT" , "CONC" ) : # concatenate, only for non-BLOBs if parent . tag != "BLOB" : # have to be careful concatenating empty/None values value = gline . value if gline . tag == "CONT" : value = b"\n" + ( value or b"" ) if value is not None : parent . value = ( parent . value or b"" ) + value return None # avoid infinite cycle dialect = model . DIALECT_DEFAULT if not ( gline . level == 0 and gline . tag == "HEAD" ) and self . _header : dialect = self . dialect rec = model . make_record ( level = gline . level , xref_id = gline . xref_id , tag = gline . tag , value = gline . value , sub_records = [ ] , offset = gline . offset , dialect = dialect , parser = self ) # add to parent's sub-records list if parent : parent . sub_records . append ( rec ) return rec
Process next record .
254
4
8,210
def validate_options ( subscription_key , text ) : if not subscription_key or len ( subscription_key ) == 0 : print 'Error: Warning the option subscription_key should contain a string.' print USAGE sys . exit ( 3 ) if not text or len ( text ) == 0 : print 'Error: Warning the option text should contain a string.' print USAGE sys . exit ( 3 )
Perform sanity checks on threshold values
84
7
8,211
def main ( ) : # Parse arguments parser = OptionParser ( ) parser . add_option ( '-n' , '--subscription_key' , dest = 'subscription_key' , help = 'subscription_key for authentication' ) parser . add_option ( '-t' , '--text' , dest = 'text' , help = 'text to synthesize' ) parser . add_option ( '-l' , '--language' , dest = 'language' , help = 'language' ) parser . add_option ( '-g' , '--gender' , dest = 'gender' , help = 'gender' ) parser . add_option ( '-d' , '--directory' , dest = 'directory' , help = 'directory to store the file' ) ( options , args ) = parser . parse_args ( ) subscription_key = options . subscription_key text = options . text language = options . language gender = options . gender directory = options . directory # Perform sanity checks on options validate_options ( subscription_key , text ) if not directory : directory = default_directory if not language : language = default_language if not gender : gender = default_gender # format = 'riff-16khz-16bit-mono-pcm' format = 'riff-8khz-8bit-mono-mulaw' # lang = 'en-AU' # gender = 'Female' tts_msspeak = MSSpeak ( subscription_key , '/tmp/' ) tts_msspeak . set_cache ( False ) output_filename = tts_msspeak . speak ( text , language , gender , format ) print 'Recorded TTS to %s%s' % ( directory , output_filename )
Parse options and process text to Microsoft Translate
386
10
8,212
def sample ( self ) : # Response: {'VehicleSpeed': {'timestamp': '2015-03-20T18:00:49Z', 'value': 223.368515}} data = self . _http_client . get ( "vehicle/data/{}" . format ( self . name ) ) [ self . name ] dt = arrow . get ( data [ "timestamp" ] ) . datetime value = data [ "value" ] return VehicleDataSample ( value , dt )
Get the current value of this vehicle data element
111
9
8,213
def _get_or_create_bucket ( self , name ) : try : return self . connection . get_bucket ( name ) except S3ResponseError , e : if AUTO_CREATE_BUCKET : return self . connection . create_bucket ( name ) raise ImproperlyConfigured , ( "Bucket specified by " "AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be " "automatically created by setting AWS_AUTO_CREATE_BUCKET=True" )
Retrieves a bucket if it exists otherwise creates it .
119
12
8,214
def _compress_content ( self , content ) : zbuf = StringIO ( ) zfile = GzipFile ( mode = 'wb' , compresslevel = 6 , fileobj = zbuf ) zfile . write ( content . read ( ) ) zfile . close ( ) content . file = zbuf return content
Gzip a given string .
68
6
8,215
def url ( self , name ) : name = urllib . quote_plus ( self . _clean_name ( name ) , safe = '/' ) if self . bucket_cname : return "http://%s/%s" % ( self . bucket_cname , name ) elif self . host : return "http://%s/%s/%s" % ( self . host , self . bucket_name , name ) # No host ? Then it's the default region return "http://s3.amazonaws.com/%s/%s" % ( self . bucket_name , name )
Since we assume all public storage with no authorization keys we can just simply dump out a URL rather than having to query S3 for new keys .
133
29
8,216
def normalize_weekly ( data ) : if "tblMenu" not in data [ "result_data" ] [ "Document" ] : data [ "result_data" ] [ "Document" ] [ "tblMenu" ] = [ ] if isinstance ( data [ "result_data" ] [ "Document" ] [ "tblMenu" ] , dict ) : data [ "result_data" ] [ "Document" ] [ "tblMenu" ] = [ data [ "result_data" ] [ "Document" ] [ "tblMenu" ] ] for day in data [ "result_data" ] [ "Document" ] [ "tblMenu" ] : if "tblDayPart" not in day : continue if isinstance ( day [ "tblDayPart" ] , dict ) : day [ "tblDayPart" ] = [ day [ "tblDayPart" ] ] for meal in day [ "tblDayPart" ] : if isinstance ( meal [ "tblStation" ] , dict ) : meal [ "tblStation" ] = [ meal [ "tblStation" ] ] for station in meal [ "tblStation" ] : if isinstance ( station [ "tblItem" ] , dict ) : station [ "tblItem" ] = [ station [ "tblItem" ] ] return data
Normalization for dining menu data
296
6
8,217
def get_meals ( v2_response , building_id ) : result_data = v2_response [ "result_data" ] meals = [ ] day_parts = result_data [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "dayparts" ] [ 0 ] for meal in day_parts : stations = [ ] for station in meal [ "stations" ] : items = [ ] for item_id in station [ "items" ] : item = result_data [ "items" ] [ item_id ] new_item = { } new_item [ "txtTitle" ] = item [ "label" ] new_item [ "txtPrice" ] = "" new_item [ "txtNutritionInfo" ] = "" new_item [ "txtDescription" ] = item [ "description" ] new_item [ "tblSide" ] = "" new_item [ "tblFarmToFork" ] = "" attrs = [ { "description" : item [ "cor_icon" ] [ attr ] } for attr in item [ "cor_icon" ] ] if len ( attrs ) == 1 : new_item [ "tblAttributes" ] = { "txtAttribute" : attrs [ 0 ] } elif len ( attrs ) > 1 : new_item [ "tblAttributes" ] = { "txtAttribute" : attrs } else : new_item [ "tblAttributes" ] = "" if isinstance ( item [ "options" ] , list ) : item [ "options" ] = { } if "values" in item [ "options" ] : for side in item [ "options" ] [ "values" ] : new_item [ "tblSide" ] = { "txtSideName" : side [ "label" ] } items . append ( new_item ) stations . append ( { "tblItem" : items , "txtStationDescription" : station [ "label" ] } ) meals . append ( { "tblStation" : stations , "txtDayPartDescription" : meal [ "label" ] } ) return meals
Extract meals into old format from a DiningV2 JSON response
464
14
8,218
def menu ( self , venue_id , date ) : query = "&date=" + date response = self . _request ( V2_ENDPOINTS [ 'MENUS' ] + venue_id + query ) return response
Get the menu for the venue corresponding to venue_id on date .
50
14
8,219
def venues ( self ) : response = self . _request ( V2_ENDPOINTS [ 'VENUES' ] ) # Normalize `dateHours` to array for venue in response [ "result_data" ] [ "document" ] [ "venue" ] : if venue . get ( "id" ) in VENUE_NAMES : venue [ "name" ] = VENUE_NAMES [ venue . get ( "id" ) ] if isinstance ( venue . get ( "dateHours" ) , dict ) : venue [ "dateHours" ] = [ venue [ "dateHours" ] ] if "dateHours" in venue : for dh in venue [ "dateHours" ] : if isinstance ( dh . get ( "meal" ) , dict ) : dh [ "meal" ] = [ dh [ "meal" ] ] return response
Get a list of all venue objects .
185
8
8,220
def menu_daily ( self , building_id ) : today = str ( datetime . date . today ( ) ) v2_response = DiningV2 ( self . bearer , self . token ) . menu ( building_id , today ) response = { 'result_data' : { 'Document' : { } } } response [ "result_data" ] [ "Document" ] [ "menudate" ] = datetime . datetime . strptime ( today , '%Y-%m-%d' ) . strftime ( '%-m/%d/%Y' ) if building_id in VENUE_NAMES : response [ "result_data" ] [ "Document" ] [ "location" ] = VENUE_NAMES [ building_id ] else : response [ "result_data" ] [ "Document" ] [ "location" ] = v2_response [ "result_data" ] [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "name" ] response [ "result_data" ] [ "Document" ] [ "tblMenu" ] = { "tblDayPart" : get_meals ( v2_response , building_id ) } return response
Get a menu object corresponding to the daily menu for the venue with building_id .
274
17
8,221
def menu_weekly ( self , building_id ) : din = DiningV2 ( self . bearer , self . token ) response = { 'result_data' : { 'Document' : { } } } days = [ ] for i in range ( 7 ) : date = str ( datetime . date . today ( ) + datetime . timedelta ( days = i ) ) v2_response = din . menu ( building_id , date ) if building_id in VENUE_NAMES : response [ "result_data" ] [ "Document" ] [ "location" ] = VENUE_NAMES [ building_id ] else : response [ "result_data" ] [ "Document" ] [ "location" ] = v2_response [ "result_data" ] [ "days" ] [ 0 ] [ "cafes" ] [ building_id ] [ "name" ] formatted_date = datetime . datetime . strptime ( date , '%Y-%m-%d' ) . strftime ( '%-m/%d/%Y' ) days . append ( { "tblDayPart" : get_meals ( v2_response , building_id ) , "menudate" : formatted_date } ) response [ "result_data" ] [ "Document" ] [ "tblMenu" ] = days return normalize_weekly ( response )
Get an array of menu objects corresponding to the weekly menu for the venue with building_id .
305
19
8,222
def to_cr ( self ) : data_new = self . data . copy ( ) data_new [ 'rpha' ] = - 1.5 * data_new [ 'chargeability' ] # now that we have magnitude and phase, compute the impedance Zt data_new [ 'Zt' ] = data_new [ 'r' ] * np . exp ( data_new [ 'rpha' ] * 1j / 1000.0 ) cr = reda . CR ( data = data_new ) return cr
Convert container to a complex resistivity container using the CPA - conversion .
112
16
8,223
def apply ( filter ) : def decorator ( callable ) : return lambda * args , * * kwargs : filter ( callable ( * args , * * kwargs ) ) return decorator
Manufacture decorator that filters return value with given function .
43
12
8,224
def format_outpat ( outpat , xn ) : return outpat . format ( year = str ( xn . date . year ) , month = '{:02}' . format ( xn . date . month ) , fy = str ( xn . date . year if xn . date . month < 7 else xn . date . year + 1 ) , date = xn . date )
Format an outpat for the given transaction .
88
9
8,225
def get ( self , name , acc = None , default = None ) : if acc in self . data [ 'accounts' ] and name in self . data [ 'accounts' ] [ acc ] : return self . data [ 'accounts' ] [ acc ] [ name ] if name in self . data : return self . data [ name ] return default
Return the named config for the given account .
76
9
8,226
def outdir ( self , acc = None ) : rootdir = self . rootdir ( ) outdir = self . get ( 'outdir' , acc = acc ) dir = os . path . join ( rootdir , outdir ) if rootdir and outdir else None if not os . path . exists ( dir ) : os . makedirs ( dir ) return dir
Return the outdir for the given account .
79
9
8,227
def outpat ( self , acc = None ) : outdir = self . outdir ( acc ) outpat = self . get ( 'outpat' , acc = acc ) return os . path . join ( outdir , outpat ) if outdir and outpat else None
Determine the full outfile pattern for the given account .
58
13
8,228
def rulesdir ( self , acc = None ) : rootdir = self . rootdir ( ) rulesdir = self . get ( 'rulesdir' , acc = acc , default = [ ] ) return os . path . join ( rootdir , rulesdir ) if rootdir and rulesdir else None
Determine the rulesdir for the given account .
62
11
8,229
def rulefiles ( self , acc = None ) : rulesdir = self . rulesdir ( acc ) rules = [ os . path . join ( rulesdir , x ) for x in self . get ( 'rules' , acc , [ ] ) ] if acc is not None : rules += self . rulefiles ( acc = None ) return rules
Return a list of rulefiles for the given account .
71
11
8,230
def download_data ( identifier , outdir ) : # determine target if use_local_data_repository is not None : url_base = 'file:' + request . pathname2url ( use_local_data_repository + os . sep ) else : url_base = repository_url print ( 'url_base: {}' . format ( url_base ) ) url = url_base + inventory_filename # download inventory file filename , headers = request . urlretrieve ( url ) df = pd . read_csv ( filename , delim_whitespace = True , comment = '#' , header = None , names = [ 'identifier' , 'rel_path' ] , ) # find relative path to data file rel_path_query = df . query ( 'identifier == "{}"' . format ( identifier ) ) if rel_path_query . shape [ 0 ] == 0 : raise Exception ( 'identifier not found' ) rel_path = rel_path_query [ 'rel_path' ] . values [ 0 ] # download the file url = url_base + rel_path print ( 'data url: {}' . format ( url ) ) filename , headers = request . urlretrieve ( url ) if not os . path . isdir ( outdir ) : os . makedirs ( outdir ) zip_obj = zipfile . ZipFile ( filename ) zip_obj . extractall ( outdir )
Download data from a separate data repository for testing .
311
10
8,231
def append ( self , item ) : if item in self : self . items [ item [ 0 ] ] . append ( item [ 1 ] ) else : self . items [ item [ 0 ] ] = [ item [ 1 ] ]
Append an item to the score set .
48
9
8,232
def scores ( self ) : return map ( lambda x : ( x [ 0 ] , sum ( x [ 1 ] ) * len ( x [ 1 ] ) ** - .5 ) , iter ( self . items . viewitems ( ) ) )
Return a list of the items with their final scores .
51
11
8,233
def highest ( self ) : scores = self . scores ( ) if not scores : return None maxscore = max ( map ( score , scores ) ) return filter ( lambda x : score ( x ) == maxscore , scores )
Return the items with the higest score .
47
9
8,234
def is_empty_shape ( sh : ShExJ . Shape ) -> bool : return sh . closed is None and sh . expression is None and sh . extra is None and sh . semActs is None
Determine whether sh has any value
44
8
8,235
def fix_text_escapes ( self , txt : str , quote_char : str ) -> str : def _subf ( matchobj ) : return matchobj . group ( 0 ) . translate ( self . re_trans_table ) if quote_char : txt = re . sub ( r'\\' + quote_char , quote_char , txt ) return re . sub ( r'\\.' , _subf , txt , flags = re . MULTILINE + re . DOTALL + re . UNICODE )
Fix the various text escapes
117
5
8,236
def fix_re_escapes ( self , txt : str ) -> str : def _subf ( matchobj ) : # o = self.fix_text_escapes(matchobj.group(0)) o = matchobj . group ( 0 ) . translate ( self . re_trans_table ) if o [ 1 ] in '\b\f\n\t\r' : return o [ 0 ] + 'bfntr' [ '\b\f\n\t\r' . index ( o [ 1 ] ) ] else : return o if o [ 1 ] in '\\.?*+^$()[]{|}' else o [ 1 ] return re . sub ( r'\\.' , _subf , txt , flags = re . MULTILINE + re . DOTALL + re . UNICODE )
The ShEx RE engine allows escaping any character . We have to remove that escape for everything except those that CAN be legitimately escaped
183
25
8,237
def _iter_response ( self , url , params = None ) : if params is None : params = { } params [ 'page_number' ] = 1 # Last page lists itself as next page while True : response = self . _request ( url , params ) for item in response [ 'result_data' ] : yield item # Last page lists itself as next page if response [ 'service_meta' ] [ 'next_page_number' ] == params [ 'page_number' ] : break params [ 'page_number' ] += 1
Return an enumerable that iterates through a multi - page API request
117
14
8,238
def search ( self , params , validate = False ) : if self . val_info is None : self . val_info = self . search_params ( ) if validate : errors = self . validate ( self . val_info , params ) if not validate or len ( errors ) == 0 : return self . _iter_response ( ENDPOINTS [ 'SEARCH' ] , params ) else : return { 'Errors' : errors }
Return a generator of section objects for the given search params .
95
12
8,239
def course ( self , dept , course_number ) : response = self . _request ( path . join ( ENDPOINTS [ 'CATALOG' ] , dept , course_number ) ) return response [ 'result_data' ] [ 0 ]
Return an object of semester - independent course info . All arguments should be strings .
58
16
8,240
def section ( self , dept , course_number , sect_number ) : section_id = dept + course_number + sect_number sections = self . search ( { 'course_id' : section_id } ) try : return next ( sections ) except StopIteration : raise ValueError ( 'Section %s not found' % section_id )
Return a single section object for the given section . All arguments should be strings . Throws a ValueError if the section is not found .
77
28
8,241
def parse_to_gvid ( v ) : from geoid . civick import GVid from geoid . acs import AcsGeoid m1 = '' try : return GVid . parse ( v ) except ValueError as e : m1 = str ( e ) try : return AcsGeoid . parse ( v ) . convert ( GVid ) except ValueError as e : raise ValueError ( "Failed to parse to either ACS or GVid: {}; {}" . format ( m1 , str ( e ) ) )
Parse an ACS Geoid or a GVID to a GVID
118
14
8,242
def base62_decode ( string ) : alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' base = len ( alphabet ) strlen = len ( string ) num = 0 idx = 0 for char in string : power = ( strlen - ( idx + 1 ) ) num += alphabet . index ( char ) * ( base ** power ) idx += 1 return int ( num )
Decode a Base X encoded string into the number
109
10
8,243
def make_classes ( base_class , module ) : from functools import partial for k in names : cls = base_class . class_factory ( k . capitalize ( ) ) cls . augment ( ) setattr ( module , k . capitalize ( ) , cls ) setattr ( module , 'get_class' , partial ( get_class , module ) )
Create derived classes and put them into the same module as the base class .
81
15
8,244
def generate_all ( sumlevel , d ) : from geoid . civick import GVid from geoid . tiger import TigerGeoid from geoid . acs import AcsGeoid sumlevel = int ( sumlevel ) d = dict ( d . items ( ) ) # Map common name variants if 'cousub' in d : d [ 'cosub' ] = d [ 'cousub' ] del d [ 'cousub' ] if 'blkgrp' in d : d [ 'blockgroup' ] = d [ 'blkgrp' ] del d [ 'blkgrp' ] if 'zcta5' in d : d [ 'zcta' ] = d [ 'zcta5' ] del d [ 'zcta5' ] gvid_class = GVid . resolve_summary_level ( sumlevel ) if not gvid_class : return { } geoidt_class = TigerGeoid . resolve_summary_level ( sumlevel ) geoid_class = AcsGeoid . resolve_summary_level ( sumlevel ) try : return dict ( gvid = str ( gvid_class ( * * d ) ) , geoid = str ( geoid_class ( * * d ) ) , geoidt = str ( geoidt_class ( * * d ) ) ) except : raise
Generate a dict that includes all of the available geoid values with keys for the most common names for those values .
297
24
8,245
def _generate_names ( ) : from ambry import get_library l = get_library ( ) counties = l . partition ( 'census.gov-acs-geofile-2009-geofile50-20095-50' ) states = l . partition ( 'census.gov-acs-geofile-2009-geofile40-20095-40' ) names = { } for row in counties . remote_datafile . reader : names [ ( row . state , row . county ) ] = row . name for row in states . remote_datafile . reader : if row . component == '00' : names [ ( row . state , 0 ) ] = row . name pprint . pprint ( names )
Code to generate the state and county names
160
8
8,246
def division_name ( self ) : try : return next ( e for e in self . type_names_re . search ( self . name ) . groups ( ) if e is not None ) except AttributeError : # The search will fail for 'District of Columbia' return ''
The type designation for the county or county equivalent such as County Parish or Borough
59
15
8,247
def augment ( cls ) : import re level_name = cls . __name__ . lower ( ) cls . sl = names [ level_name ] cls . class_map [ cls . __name__ . lower ( ) ] = cls cls . sl_map [ cls . sl ] = cls cls . fmt = cls . make_format_string ( cls . __name__ . lower ( ) ) cls . regex_str = cls . make_regex ( cls . __name__ . lower ( ) ) cls . regex = re . compile ( cls . regex_str ) # List of field names cls . level = level_name cls . fields = segments [ cls . sl ]
Augment the class with computed formats regexes and other things . This caches these values so they don t have to be created for every instance .
162
29
8,248
def get_class ( cls , name_or_sl ) : try : return cls . sl_map [ int ( name_or_sl ) ] except TypeError as e : raise TypeError ( "Bad name or sl: {} : {}" . format ( name_or_sl , e ) ) except ValueError : try : return cls . class_map [ name_or_sl . lower ( ) ] except ( KeyError , ValueError ) : raise NotASummaryName ( "Value '{}' is not a valid summary level" . format ( name_or_sl ) )
Return a derived class based on the class name or the summary_level
129
14
8,249
def geo_name ( self ) : if self . level == 'county' : return str ( self . county_name ) elif self . level == 'state' : return self . state_name else : if hasattr ( self , 'county' ) : return "{} in {}" . format ( self . level , str ( self . county_name ) ) elif hasattr ( self , 'state' ) : return "{} in {}" . format ( self . level , self . state_name ) else : return "a {}" . format ( self . level )
Return a name of the state or county or for other lowever levels the name of the level type in the county .
124
24
8,250
def parse ( cls , gvid , exception = True ) : if gvid == 'invalid' : return cls . get_class ( 'null' ) ( 0 ) if not bool ( gvid ) : return None if not isinstance ( gvid , six . string_types ) : raise TypeError ( "Can't parse; not a string. Got a '{}' " . format ( type ( gvid ) ) ) try : if not cls . sl : # Civick and ACS include the SL, so can call from base type. if six . PY3 : fn = cls . decode else : fn = cls . decode . __func__ sl = fn ( gvid [ 0 : cls . sl_width ] ) else : sl = cls . sl # Otherwise must use derived class. except ValueError as e : if exception : raise ValueError ( "Failed to parse gvid '{}': {}" . format ( gvid , str ( e ) ) ) else : return cls . get_class ( 'null' ) ( 0 ) try : cls = cls . sl_map [ sl ] except KeyError : if exception : raise ValueError ( "Failed to parse gvid '{}': Unknown summary level '{}' " . format ( gvid , sl ) ) else : return cls . get_class ( 'null' ) ( 0 ) m = cls . regex . match ( gvid ) if not m : raise ValueError ( "Failed to match '{}' to '{}' " . format ( gvid , cls . regex_str ) ) d = m . groupdict ( ) if not d : return None if six . PY3 : fn = cls . decode else : fn = cls . decode . __func__ d = { k : fn ( v ) for k , v in d . items ( ) } try : del d [ 'sl' ] except KeyError : pass return cls ( * * d )
Parse a string value into the geoid of this class .
430
13
8,251
def convert ( self , root_cls ) : d = self . __dict__ d [ 'sl' ] = self . sl try : cls = root_cls . get_class ( root_cls . sl ) except ( AttributeError , TypeError ) : # Hopefully because root_cls is a module cls = root_cls . get_class ( self . sl ) return cls ( * * d )
Convert to another derived class . cls is the base class for the derived type ie AcsGeoid TigerGeoid etc .
93
27
8,252
def promote ( self , level = None ) : if level is None : if len ( self . fields ) < 2 : if self . level in ( 'region' , 'division' , 'state' , 'ua' ) : cls = self . get_class ( 'us' ) else : return None else : cls = self . get_class ( self . fields [ - 2 ] ) else : cls = self . get_class ( level ) d = dict ( self . __dict__ . items ( ) ) d [ 'sl' ] = self . sl return cls ( * * d )
Convert to the next higher level summary level
129
9
8,253
def allval ( self ) : d = dict ( self . __dict__ . items ( ) ) d [ 'sl' ] = self . sl d [ self . level ] = 0 cls = self . get_class ( self . sl ) return cls ( * * d )
Convert the last value to zero . This form represents the entire higher summary level at the granularity of the lower summary level . For example for a county it means All counties in the state
60
38
8,254
def nullval ( cls ) : d = dict ( cls . __dict__ . items ( ) ) for k in d : d [ k ] = 0 d [ 'sl' ] = cls . sl d [ cls . level ] = 0 return cls ( * * d )
Create a new instance where all of the values are 0
62
11
8,255
def split_name ( name ) : given1 , _ , rem = name . partition ( "/" ) surname , _ , given2 = rem . partition ( "/" ) return given1 . strip ( ) , surname . strip ( ) , given2 . strip ( )
Extracts pieces of name from full name string .
56
11
8,256
def parse_name_altree ( record ) : name_tuple = split_name ( record . value ) if name_tuple [ 1 ] == '?' : name_tuple = ( name_tuple [ 0 ] , '' , name_tuple [ 2 ] ) maiden = record . sub_tag_value ( "SURN" ) if maiden : # strip "(maiden)" from family name ending = '(' + maiden + ')' surname = name_tuple [ 1 ] if surname . endswith ( ending ) : surname = surname [ : - len ( ending ) ] . rstrip ( ) if surname == '?' : surname = '' name_tuple = ( name_tuple [ 0 ] , surname , name_tuple [ 2 ] , maiden ) return name_tuple
Parse NAME structure assuming ALTREE dialect .
171
10
8,257
def parse_name_myher ( record ) : name_tuple = split_name ( record . value ) married = record . sub_tag_value ( "_MARNM" ) if married : maiden = name_tuple [ 1 ] name_tuple = ( name_tuple [ 0 ] , married , name_tuple [ 2 ] , maiden ) return name_tuple
Parse NAME structure assuming MYHERITAGE dialect .
82
11
8,258
def number ( items ) : n = len ( items ) if n == 0 : return items places = str ( int ( math . log10 ( n ) // 1 + 1 ) ) format = '[{0[0]:' + str ( int ( places ) ) + 'd}] {0[1]}' return map ( lambda x : format . format ( x ) , enumerate ( items ) )
Maps numbering onto given values
85
5
8,259
def filter_yn ( string , default = None ) : if string . startswith ( ( 'Y' , 'y' ) ) : return True elif string . startswith ( ( 'N' , 'n' ) ) : return False elif not string and default is not None : return True if default else False raise InvalidInputError
Return True if yes False if no or the default .
73
11
8,260
def filter_int ( string , default = None , start = None , stop = None ) : try : i = int ( string ) if start is not None and i < start : raise InvalidInputError ( "value too small" ) if stop is not None and i >= stop : raise InvalidInputError ( "value too large" ) return i except ValueError : if not string and default is not None : # empty string, default was given return default else : raise InvalidInputError
Return the input integer or the default .
100
8
8,261
def filter_decimal ( string , default = None , lower = None , upper = None ) : try : d = decimal . Decimal ( string ) if lower is not None and d < lower : raise InvalidInputError ( "value too small" ) if upper is not None and d >= upper : raise InvalidInputError ( "value too large" ) return d except decimal . InvalidOperation : if not string and default is not None : # empty string, default was given return default else : raise InvalidInputError ( "invalid decimal number" )
Return the input decimal number or the default .
114
9
8,262
def filter_pastdate ( string , default = None ) : if not string and default is not None : return default today = datetime . date . today ( ) # split the string try : parts = map ( int , re . split ( '\D+' , string ) ) # split the string except ValueError : raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" ) if len ( parts ) < 1 or len ( parts ) > 3 : raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" ) if len ( parts ) == 1 : # no month or year given; append month parts . append ( today . month - 1 if parts [ 0 ] > today . day else today . month ) if parts [ 1 ] < 1 : parts [ 1 ] = 12 if len ( parts ) == 2 : # no year given; append year if parts [ 1 ] > today . month or parts [ 1 ] == today . month and parts [ 0 ] > today . day : parts . append ( today . year - 1 ) else : parts . append ( today . year ) parts . reverse ( ) try : date = datetime . date ( * parts ) if date > today : raise InvalidInputError ( "cannot choose a date in the future" ) return date except ValueError : print parts raise InvalidInputError ( "invalid date; use format: DD [MM [YYYY]]" )
Coerce to a date not beyond the current date
311
11
8,263
def input ( self , filter_fn , prompt ) : while True : try : return filter_fn ( raw_input ( prompt ) ) except InvalidInputError as e : if e . message : self . show ( 'ERROR: ' + e . message ) except KeyboardInterrupt : raise RejectWarning
Prompt user until valid input is received .
63
9
8,264
def text ( self , prompt , default = None ) : prompt = prompt if prompt is not None else 'Enter some text' prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return self . input ( curry ( filter_text , default = default ) , prompt )
Prompts the user for some text with optional default
67
11
8,265
def decimal ( self , prompt , default = None , lower = None , upper = None ) : prompt = prompt if prompt is not None else "Enter a decimal number" prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return self . input ( curry ( filter_decimal , default = default , lower = lower , upper = upper ) , prompt )
Prompts user to input decimal with optional default and bounds .
85
13
8,266
def pastdate ( self , prompt , default = None ) : prompt = prompt if prompt is not None else "Enter a past date" if default is not None : prompt += " [" + default . strftime ( '%d %m %Y' ) + "]" prompt += ': ' return self . input ( curry ( filter_pastdate , default = default ) , prompt )
Prompts user to input a date in the past .
80
12
8,267
def choose ( self , prompt , items , default = None ) : if default is not None and ( default >= len ( items ) or default < 0 ) : raise IndexError prompt = prompt if prompt is not None else "Choose from following:" self . show ( prompt + '\n' ) self . show ( "\n" . join ( number ( items ) ) ) # show the items prompt = "Enter number of chosen item" prompt += " [{0}]: " . format ( default ) if default is not None else ': ' return items [ self . input ( curry ( filter_int , default = default , start = 0 , stop = len ( items ) ) , prompt ) ]
Prompts the user to choose one item from a list .
145
13
8,268
def goose_extractor ( url ) : article = Goose ( ) . extract ( url = url ) return article . title , article . meta_description , article . cleaned_text
webpage extraction using Goose Library
37
6
8,269
def _tokenize ( sentence ) : _tokens = nltk . word_tokenize ( sentence ) tokens = [ stemmer . stem ( tk ) for tk in _tokens ] return tokens
Tokenizer and Stemmer
46
6
8,270
def _title_similarity_score ( full_text , title ) : sentences = sentence_tokenizer ( full_text ) norm = _normalize ( [ title ] + sentences ) similarity_matrix = pairwise_kernels ( norm , metric = 'cosine' ) return sorted ( zip ( similarity_matrix [ 0 , 1 : ] , range ( len ( similarity_matrix ) ) , sentences ) , key = lambda tup : tup [ 0 ] , reverse = True )
Similarity scores for sentences with title in descending order
105
10
8,271
def _aggregrate_scores ( its , tss , num_sentences ) : final = [ ] for i , el in enumerate ( its ) : for j , le in enumerate ( tss ) : if el [ 2 ] == le [ 2 ] : assert el [ 1 ] == le [ 1 ] final . append ( ( el [ 1 ] , i + j , el [ 2 ] ) ) _final = sorted ( final , key = lambda tup : tup [ 1 ] ) [ : num_sentences ] return sorted ( _final , key = lambda tup : tup [ 0 ] )
rerank the two vectors by min aggregrate rank reorder
133
14
8,272
def _eval_meta_as_summary ( meta ) : if meta == '' : return False if len ( meta ) > 500 : return False if 'login' in meta . lower ( ) : return False return True
some crude heuristics for now most are implemented on bot - side with domain whitelists
45
19
8,273
def get_subscriptions ( self ) : # Example: {'subscriptions': ['subscriptions/TripDistance~sub', 'subscriptions/FuelRate~sub', ]} subscriptions = [ ] for uri in self . get_http_client ( ) . get ( "subscriptions" ) . get ( 'subscriptions' ) : subscriptions . append ( self . get_subscription ( uri . split ( "/" ) [ - 1 ] ) ) return subscriptions
Return a list of subscriptions currently active for this WVA device
105
12
8,274
def get_event_stream ( self ) : if self . _event_stream is None : self . _event_stream = WVAEventStream ( self . _http_client ) return self . _event_stream
Get the event stream associated with this WVA
46
9
8,275
def _populateHistogram ( self ) : try : buildHistogram . populate1DHist ( self . _data , self . histogram , self . minValue , self . maxValue , self . binWidth ) except : if ( ( self . _data . max ( ) - self . _data . min ( ) ) < self . binWidth ) : raise ValueError ( "In histogram1d class, the binWidth is " "greater than the data range of the array " "object." ) else : raise SystemError ( "An error processing the array object " "information occured in the buildHistogram " "module of histogram1d." )
Call the C - code that actually populates the histogram
141
12
8,276
def getCenters ( self ) : return np . arange ( self . histogram . size ) * self . binWidth + self . minValue
Returns histogram s centers .
31
6
8,277
def book_reservation ( self , sessionid , roomid , start , end ) : duration = int ( ( end - start ) . seconds / 60 ) format = "%Y-%m-%dT%H:%M:%S-{}" . format ( self . get_dst_gmt_timezone ( ) ) booking_url = "{}/reserve/{}/{}/?d={}" . format ( BASE_URL , roomid , start . strftime ( format ) , duration ) resp = requests . get ( booking_url , cookies = { "sessionid" : sessionid } ) if resp . status_code == 403 : return { "success" : False , "error" : "Your account does not have permission to book Wharton GSRs!" } resp . raise_for_status ( ) csrfheader = re . search ( r"csrftoken=(.*?);" , resp . headers [ "Set-Cookie" ] ) . group ( 1 ) csrftoken = re . search ( r"<input name=\"csrfmiddlewaretoken\" type=\"hidden\" value=\"(.*?)\"/>" , resp . content . decode ( "utf8" ) ) . group ( 1 ) start_string = start . strftime ( "%I:%M %p" ) if start_string [ 0 ] == "0" : start_string = start_string [ 1 : ] resp = requests . post ( booking_url , cookies = { "sessionid" : sessionid , "csrftoken" : csrfheader } , headers = { "Referer" : booking_url } , data = { "csrfmiddlewaretoken" : csrftoken , "room" : roomid , "start_time" : start_string , "end_time" : end . strftime ( "%a %b %d %H:%M:%S %Y" ) , "date" : start . strftime ( "%B %d, %Y" ) } ) resp . raise_for_status ( ) content = resp . content . decode ( "utf8" ) if "errorlist" in content : error_msg = re . search ( r"class=\"errorlist\"><li>(.*?)</li>" , content ) . group ( 1 ) return { "success" : False , "error" : error_msg } return { "success" : True }
Book a reservation given the session id the room id as an integer and the start and end time as datetimes .
525
23
8,278
def delete_booking ( self , sessionid , booking_id ) : url = "{}{}{}/" . format ( BASE_URL , "/delete/" , booking_id ) cookies = dict ( sessionid = sessionid ) try : resp = requests . get ( url , cookies = cookies , headers = { 'Referer' : '{}{}' . format ( BASE_URL , "/reservations/" ) } ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) if resp . status_code == 404 : raise APIError ( "Booking could not be found on server." ) html = resp . content . decode ( "utf8" ) if "https://weblogin.pennkey.upenn.edu" in html : raise APIError ( "Wharton Auth Failed. Session ID is not valid." ) resp . raise_for_status ( ) soup = BeautifulSoup ( html , "html5lib" ) middleware_token = soup . find ( "input" , { 'name' : "csrfmiddlewaretoken" } ) . get ( 'value' ) csrftoken = resp . cookies [ 'csrftoken' ] cookies2 = { 'sessionid' : sessionid , 'csrftoken' : csrftoken } headers = { 'Referer' : url } payload = { 'csrfmiddlewaretoken' : middleware_token } try : resp2 = requests . post ( url , cookies = cookies2 , data = payload , headers = headers ) except resp2 . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return { "success" : True }
Deletes a Wharton GSR Booking for a given booking and session id .
372
17
8,279
def get_wharton_gsrs ( self , sessionid , date = None ) : if date : date += " {}" . format ( self . get_dst_gmt_timezone ( ) ) else : date = datetime . datetime . utcnow ( ) . strftime ( "%Y-%m-%d %H:%S" ) resp = requests . get ( 'https://apps.wharton.upenn.edu/gsr/api/app/grid_view/' , params = { 'search_time' : date } , cookies = { 'sessionid' : sessionid } ) if resp . status_code == 200 : return resp . json ( ) else : raise APIError ( 'Remote server returned status code {}.' . format ( resp . status_code ) )
Make a request to retrieve Wharton GSR listings .
174
11
8,280
def switch_format ( self , gsr ) : if "error" in gsr : return gsr categories = { "cid" : 1 , "name" : "Huntsman Hall" , "rooms" : [ ] } for time in gsr [ "times" ] : for entry in time : entry [ "name" ] = entry [ "room_number" ] del entry [ "room_number" ] start_time_str = entry [ "start_time" ] end_time = datetime . datetime . strptime ( start_time_str [ : - 6 ] , '%Y-%m-%dT%H:%M:%S' ) + datetime . timedelta ( minutes = 30 ) end_time_str = end_time . strftime ( "%Y-%m-%dT%H:%M:%S" ) + "-{}" . format ( self . get_dst_gmt_timezone ( ) ) time = { "available" : not entry [ "reserved" ] , "start" : entry [ "start_time" ] , "end" : end_time_str , } exists = False for room in categories [ "rooms" ] : if room [ "name" ] == entry [ "name" ] : room [ "times" ] . append ( time ) exists = True if not exists : del entry [ "booked_by_user" ] del entry [ "building" ] if "reservation_id" in entry : del entry [ "reservation_id" ] entry [ "lid" ] = 1 entry [ "gid" ] = 1 entry [ "capacity" ] = 5 entry [ "room_id" ] = int ( entry [ "id" ] ) del entry [ "id" ] entry [ "times" ] = [ time ] del entry [ "reserved" ] del entry [ "end_time" ] del entry [ "start_time" ] categories [ "rooms" ] . append ( entry ) return { "categories" : [ categories ] , "rooms" : categories [ "rooms" ] }
Convert the Wharton GSR format into the studyspaces API format .
459
16
8,281
def get_wharton_gsrs_formatted ( self , sessionid , date = None ) : gsrs = self . get_wharton_gsrs ( sessionid , date ) return self . switch_format ( gsrs )
Return the wharton GSR listing formatted in studyspaces format .
52
14
8,282
def get_options ( ) : options = collections . defaultdict ( list ) for opt_class in config_factory . get_options ( ) : if not issubclass ( opt_class , config_base . Options ) : continue config_options = opt_class ( None ) options [ config_options . group_name ] . extend ( config_options . list ( ) ) return [ ( key , value ) for key , value in options . items ( ) ]
Collect all the options info from the other modules .
99
10
8,283
def check_is_working ( self ) : try : r = requests . post ( "http://{}/" . format ( LAUNDRY_DOMAIN ) , timeout = 60 , data = { "locationid" : "5faec7e9-a4aa-47c2-a514-950c03fac460" , "email" : "pennappslabs@gmail.com" , "washers" : 0 , "dryers" : 0 , "locationalert" : "OK" } ) r . raise_for_status ( ) return "The transaction log for database 'QuantumCoin' is full due to 'LOG_BACKUP'." not in r . text except requests . exceptions . HTTPError : return False
Returns True if the wash alert web interface seems to be working properly or False otherwise .
161
17
8,284
def machine_usage ( self , hall_no ) : try : num = int ( hall_no ) except ValueError : raise ValueError ( "Room Number must be integer" ) r = requests . get ( USAGE_BASE_URL + str ( num ) , timeout = 60 ) parsed = BeautifulSoup ( r . text , 'html5lib' ) usage_table = parsed . find_all ( 'table' , width = '504px' ) [ 0 ] rows = usage_table . find_all ( 'tr' ) usages = { } for i , row in enumerate ( rows ) : day = [ ] hours = row . find_all ( 'td' ) for hour in hours : day . append ( self . busy_dict [ str ( hour [ 'class' ] [ 0 ] ) ] ) usages [ self . days [ i ] ] = day return usages
Returns the average usage of laundry machines every hour for a given hall .
191
14
8,285
def create_message ( from_addr , to_addr , subject , body , encoding = None ) : if encoding == "None" : encoding = None if not encoding : encoding = 'utf-8' msg = MIMEText ( body . encode ( encoding ) , 'plain' , encoding ) msg [ 'Subject' ] = Header ( subject . encode ( encoding ) , encoding ) msg [ 'From' ] = from_addr msg [ 'To' ] = to_addr msg [ 'Date' ] = formatdate ( ) return msg
Create message object for sending email
114
6
8,286
def _obtain_token ( self ) : # don't renew token if hasn't expired yet if self . expiration and self . expiration > datetime . datetime . now ( ) : return resp = requests . post ( "{}/1.1/oauth/token" . format ( API_URL ) , data = { "client_id" : self . client_id , "client_secret" : self . client_secret , "grant_type" : "client_credentials" } ) . json ( ) if "error" in resp : raise APIError ( "LibCal Auth Failed: {}, {}" . format ( resp [ "error" ] , resp . get ( "error_description" ) ) ) self . expiration = datetime . datetime . now ( ) + datetime . timedelta ( seconds = resp [ "expires_in" ] ) self . token = resp [ "access_token" ] print ( self . token )
Obtain an auth token from client id and client secret .
206
12
8,287
def _request ( self , * args , * * kwargs ) : if not self . token : self . _obtain_token ( ) headers = { "Authorization" : "Bearer {}" . format ( self . token ) } # add authorization headers if "headers" in kwargs : kwargs [ "headers" ] . update ( headers ) else : kwargs [ "headers" ] = headers # add api site to url args = list ( args ) if not args [ 1 ] . startswith ( "http" ) : args [ 1 ] = "{}{}" . format ( API_URL , args [ 1 ] ) has_no_token = kwargs . get ( "no_token" ) if has_no_token : del kwargs [ "no_token" ] resp = requests . request ( * args , * * kwargs ) if resp . status_code == 401 and not has_no_token : self . _obtain_token ( ) kwargs [ "no_token" ] = True self . _request ( * args , * * kwargs ) return resp
Make a signed request to the libcal API .
242
10
8,288
def get_rooms ( self , lid , start = None , end = None ) : range_str = "availability" if start : start_datetime = datetime . datetime . combine ( datetime . datetime . strptime ( start , "%Y-%m-%d" ) . date ( ) , datetime . datetime . min . time ( ) ) range_str += "=" + start if end and not start == end : range_str += "," + end else : start_datetime = None resp = self . _request ( "GET" , "/1.1/space/categories/{}" . format ( lid ) ) . json ( ) if "error" in resp : raise APIError ( resp [ "error" ] ) output = { "id" : lid , "categories" : [ ] } # if there aren't any rooms associated with this location, return if len ( resp ) < 1 : return output if "error" in resp [ 0 ] : raise APIError ( resp [ 0 ] [ "error" ] ) if "categories" not in resp [ 0 ] : return output categories = resp [ 0 ] [ "categories" ] id_to_category = { i [ "cid" ] : i [ "name" ] for i in categories } categories = "," . join ( [ str ( x [ "cid" ] ) for x in categories ] ) resp = self . _request ( "GET" , "/1.1/space/category/{}" . format ( categories ) ) for category in resp . json ( ) : cat_out = { "cid" : category [ "cid" ] , "name" : id_to_category [ category [ "cid" ] ] , "rooms" : [ ] } # ignore equipment categories if cat_out [ "name" ] . endswith ( "Equipment" ) : continue items = category [ "items" ] items = "," . join ( [ str ( x ) for x in items ] ) resp = self . _request ( "GET" , "/1.1/space/item/{}?{}" . format ( items , range_str ) ) for room in resp . json ( ) : if room [ "id" ] in ROOM_BLACKLIST : continue # prepend protocol to urls if "image" in room and room [ "image" ] : if not room [ "image" ] . startswith ( "http" ) : room [ "image" ] = "https:" + room [ "image" ] # convert html descriptions to text if "description" in room : description = room [ "description" ] . replace ( u'\xa0' , u' ' ) room [ "description" ] = BeautifulSoup ( description , "html.parser" ) . text . strip ( ) # remove extra fields if "formid" in room : del room [ "formid" ] # enforce date filter # API returns dates outside of the range, fix this manually if start_datetime : out_times = [ ] for time in room [ "availability" ] : parsed_start = datetime . datetime . strptime ( time [ "from" ] [ : - 6 ] , "%Y-%m-%dT%H:%M:%S" ) if parsed_start >= start_datetime : out_times . append ( time ) room [ "availability" ] = out_times cat_out [ "rooms" ] . append ( room ) if cat_out [ "rooms" ] : output [ "categories" ] . append ( cat_out ) return output
Returns a list of rooms and their availabilities grouped by category .
781
13
8,289
def book_room ( self , item , start , end , fname , lname , email , nickname , custom = { } , test = False ) : data = { "start" : start , "fname" : fname , "lname" : lname , "email" : email , "nickname" : nickname , "bookings" : [ { "id" : item , "to" : end } ] , "test" : test } data . update ( custom ) resp = self . _request ( "POST" , "/1.1/space/reserve" , json = data ) out = resp . json ( ) if "errors" in out and "error" not in out : errors = out [ "errors" ] if isinstance ( errors , list ) : errors = " " . join ( errors ) out [ "error" ] = BeautifulSoup ( errors . replace ( "\n" , " " ) , "html.parser" ) . text . strip ( ) del out [ "errors" ] if "results" not in out : if "error" not in out : out [ "error" ] = None out [ "results" ] = True else : out [ "results" ] = False return out
Books a room given the required information .
263
8
8,290
def cancel_room ( self , booking_id ) : resp = self . _request ( "POST" , "/1.1/space/cancel/{}" . format ( booking_id ) ) return resp . json ( )
Cancel a room given a booking id .
49
9
8,291
def get_reservations ( self , email , date , timeout = None ) : try : resp = self . _request ( "GET" , "/1.1/space/bookings?email={}&date={}&limit=100" . format ( email , date ) , timeout = timeout ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) except requests . exceptions . ConnectTimeout : raise APIError ( "Timeout Error" ) return resp . json ( )
Gets reservations for a given email .
114
8
8,292
def get_reservations_for_booking_ids ( self , booking_ids ) : try : resp = self . _request ( "GET" , "/1.1/space/booking/{}" . format ( booking_ids ) ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return resp . json ( )
Gets booking information for a given list of booking ids .
86
13
8,293
def get_room_info ( self , room_ids ) : try : resp = self . _request ( "GET" , "/1.1/space/item/{}" . format ( room_ids ) ) rooms = resp . json ( ) for room in rooms : if not room [ "image" ] . startswith ( "http" ) : room [ "image" ] = "https:" + room [ "image" ] if "description" in room : description = room [ "description" ] . replace ( u'\xa0' , u' ' ) room [ "description" ] = BeautifulSoup ( description , "html.parser" ) . text . strip ( ) except resp . exceptions . HTTPError as error : raise APIError ( "Server Error: {}" . format ( error ) ) return rooms
Gets room information for a given list of ids .
175
12
8,294
def reconstruct_ancestral_states ( tree , character , states , prediction_method = MPPA , model = F81 , params = None , avg_br_len = None , num_nodes = None , num_tips = None , force_joint = True ) : logging . getLogger ( 'pastml' ) . debug ( 'ACR settings for {}:\n\tMethod:\t{}{}.' . format ( character , prediction_method , '\n\tModel:\t{}' . format ( model ) if model and is_ml ( prediction_method ) else '' ) ) if COPY == prediction_method : return { CHARACTER : character , STATES : states , METHOD : prediction_method } if not num_nodes : num_nodes = sum ( 1 for _ in tree . traverse ( ) ) if not num_tips : num_tips = len ( tree ) if is_ml ( prediction_method ) : if avg_br_len is None : avg_br_len = np . mean ( n . dist for n in tree . traverse ( ) if n . dist ) freqs , sf , kappa = None , None , None if params is not None : freqs , sf , kappa = _parse_pastml_parameters ( params , states ) return ml_acr ( tree = tree , character = character , prediction_method = prediction_method , model = model , states = states , avg_br_len = avg_br_len , num_nodes = num_nodes , num_tips = num_tips , freqs = freqs , sf = sf , kappa = kappa , force_joint = force_joint ) if is_parsimonious ( prediction_method ) : return parsimonious_acr ( tree , character , prediction_method , states , num_nodes , num_tips ) raise ValueError ( 'Method {} is unknown, should be one of ML ({}), one of MP ({}) or {}' . format ( prediction_method , ', ' . join ( ML_METHODS ) , ', ' . join ( MP_METHODS ) , COPY ) )
Reconstructs ancestral states for the given character on the given tree .
470
15
8,295
def acr ( tree , df , prediction_method = MPPA , model = F81 , column2parameters = None , force_joint = True ) : for c in df . columns : df [ c ] = df [ c ] . apply ( lambda _ : '' if pd . isna ( _ ) else _ . encode ( 'ASCII' , 'replace' ) . decode ( ) ) columns = preannotate_tree ( df , tree ) name_tree ( tree ) collapse_zero_branches ( tree , features_to_be_merged = df . columns ) avg_br_len , num_nodes , num_tips = get_tree_stats ( tree ) logging . getLogger ( 'pastml' ) . debug ( '\n=============ACR===============================' ) column2parameters = column2parameters if column2parameters else { } def _work ( args ) : return reconstruct_ancestral_states ( * args , avg_br_len = avg_br_len , num_nodes = num_nodes , num_tips = num_tips , force_joint = force_joint ) prediction_methods = value2list ( len ( columns ) , prediction_method , MPPA ) models = value2list ( len ( columns ) , model , F81 ) def get_states ( method , model , column ) : df_states = [ _ for _ in df [ column ] . unique ( ) if pd . notnull ( _ ) and _ != '' ] if not is_ml ( method ) or model not in { HKY , JTT } : return np . sort ( df_states ) states = HKY_STATES if HKY == model else JTT_STATES if not set ( df_states ) & set ( states ) : raise ValueError ( 'The allowed states for model {} are {}, ' 'but your annotation file specifies {} as states in column {}.' . format ( model , ', ' . join ( states ) , ', ' . join ( df_states ) , column ) ) state_set = set ( states ) df [ column ] = df [ column ] . apply ( lambda _ : _ if _ in state_set else '' ) return states with ThreadPool ( ) as pool : acr_results = pool . map ( func = _work , iterable = ( ( tree , column , get_states ( method , model , column ) , method , model , column2parameters [ column ] if column in column2parameters else None ) for ( column , method , model ) in zip ( columns , prediction_methods , models ) ) ) result = [ ] for acr_res in acr_results : if isinstance ( acr_res , list ) : result . extend ( acr_res ) else : result . append ( acr_res ) return result
Reconstructs ancestral states for the given tree and all the characters specified as columns of the given annotation dataframe .
615
24
8,296
def compute_correction_factors ( data , true_conductivity , elem_file , elec_file ) : settings = { 'rho' : 100 , 'pha' : 0 , 'elem' : 'elem.dat' , 'elec' : 'elec.dat' , '2D' : True , 'sink_node' : 100 , } K = geometric_factors . compute_K_numerical ( data , settings = settings ) data = geometric_factors . apply_K ( data , K ) data = fixK . fix_sign_with_K ( data ) frequency = 100 data_onef = data . query ( 'frequency == {}' . format ( frequency ) ) rho_measured = data_onef [ 'r' ] * data_onef [ 'k' ] rho_true = 1 / true_conductivity * 1e4 correction_factors = rho_true / rho_measured collection = np . hstack ( ( data_onef [ [ 'a' , 'b' , 'm' , 'n' ] ] . values , np . abs ( correction_factors ) [ : , np . newaxis ] ) ) return collection
Compute correction factors for 2D rhizotron geometries following Weigand and Kemna 2017 Biogeosciences
268
27
8,297
def rdf_suffix ( fmt : str ) -> str : for k , v in SUFFIX_FORMAT_MAP . items ( ) : if fmt == v : return k return 'rdf'
Map the RDF format to the approproate suffix
44
11
8,298
def export_bert ( data , electrodes , filename ) : # Check for multiple timesteps if has_multiple_timesteps ( data ) : for i , timestep in enumerate ( split_timesteps ( data ) ) : export_bert ( timestep , electrodes , filename . replace ( "." , "_%.3d." % i ) ) # TODO: Make ABMN consistent # index_full = ert.data.groupby(list("abmn")).groups.keys() # g = ert.data.groupby('timestep') # q = ert.data.pivot_table(values='r', index=list("abmn"), columns="timestep", dropna=True) # ert.data.reset_index(list("abmn")) f = open ( filename , 'w' ) f . write ( "%d\n" % len ( electrodes ) ) f . write ( "# " ) # Make temporary copies for renaming electrodes = electrodes . copy ( ) data = data . copy ( ) electrodes . columns = electrodes . columns . str . lower ( ) data . columns = data . columns . str . lower ( ) # Remove unnecessary columns and rename according to bert conventions # https://gitlab.com/resistivity-net/bert#the-unified-data-format cols_to_export = [ "a" , "b" , "m" , "n" , "u" , "i" , "r" , "rho_a" , "error" ] data . drop ( data . columns . difference ( cols_to_export ) , 1 , inplace = True ) data . rename ( columns = { "rho_a" : "rhoa" , "error" : "err" } , inplace = True ) for key in electrodes . keys ( ) : f . write ( "%s " % key ) f . write ( "\n" ) for row in electrodes . itertuples ( index = False ) : for val in row : f . write ( "%5.3f " % val ) f . write ( "\n" ) f . write ( "%d\n" % len ( data ) ) f . write ( "# " ) # Make sure that a, b, m, n are the first 4 columns columns = data . columns . tolist ( ) for c in "abmn" : columns . remove ( c ) columns = list ( "abmn" ) + columns data = data [ columns ] for key in data . keys ( ) : f . write ( "%s " % key ) f . write ( "\n" ) for row in data . itertuples ( index = False ) : for i , val in enumerate ( row ) : if i < 4 : f . write ( "%d " % val ) else : f . write ( "%E " % val ) f . write ( "\n" ) f . close ( )
Export to unified data format used in pyGIMLi & BERT .
632
15
8,299
def reset ( self , index = None ) : points_handler_count = len ( self . registration_view . points ) if index is None : indexes = range ( points_handler_count ) else : indexes = [ index ] indexes = [ i for i in indexes if i < points_handler_count ] for i in indexes : self . registration_view . points [ i ] . reset ( ) if indexes : self . registration_view . update_transform ( )
Reset the points for the specified index position . If no index is specified reset points for all point handlers .
98
22