idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
238,400
def _allowAnotherAt ( cls , parent ) : site = parent . get_site ( ) if site is None : return False return not cls . peers ( ) . descendant_of ( site . root_page ) . exists ( )
You can only create one of these pages per site .
51
11
238,401
def peers ( cls ) : contentType = ContentType . objects . get_for_model ( cls ) return cls . objects . filter ( content_type = contentType )
Return others of the same concrete type .
39
8
238,402
def _getEventsOnDay ( self , request , day ) : home = request . site . root_page return getAllEventsByDay ( request , day , day , home = home ) [ 0 ]
Return all the events in this site for a given day .
43
12
238,403
def _getEventsByDay ( self , request , firstDay , lastDay ) : home = request . site . root_page return getAllEventsByDay ( request , firstDay , lastDay , home = home )
Return the events in this site for the dates given grouped by day .
46
14
238,404
def _getEventsByWeek ( self , request , year , month ) : home = request . site . root_page return getAllEventsByWeek ( request , year , month , home = home )
Return the events in this site for the given month grouped by week .
42
14
238,405
def _getUpcomingEvents ( self , request ) : home = request . site . root_page return getAllUpcomingEvents ( request , home = home )
Return the upcoming events in this site .
34
8
238,406
def _getPastEvents ( self , request ) : home = request . site . root_page return getAllPastEvents ( request , home = home )
Return the past events in this site .
32
8
238,407
def _getEventFromUid ( self , request , uid ) : event = getEventFromUid ( request , uid ) # might raise ObjectDoesNotExist home = request . site . root_page if event . get_ancestors ( ) . filter ( id = home . id ) . exists ( ) : # only return event if it is in the same site return event
Try and find an event with the given UID in this site .
83
13
238,408
def _getAllEvents ( self , request ) : home = request . site . root_page return getAllEvents ( request , home = home )
Return all the events in this site .
31
8
238,409
def _getEventsOnDay ( self , request , day ) : return getAllEventsByDay ( request , day , day , home = self ) [ 0 ]
Return my child events for a given day .
34
9
238,410
def _getEventsByDay ( self , request , firstDay , lastDay ) : return getAllEventsByDay ( request , firstDay , lastDay , home = self )
Return my child events for the dates given grouped by day .
37
12
238,411
def _getEventsByWeek ( self , request , year , month ) : return getAllEventsByWeek ( request , year , month , home = self )
Return my child events for the given month grouped by week .
33
12
238,412
def _getEventFromUid ( self , request , uid ) : event = getEventFromUid ( request , uid ) if event . get_ancestors ( ) . filter ( id = self . id ) . exists ( ) : # only return event if it is a descendant return event
Try and find a child event with the given UID .
64
11
238,413
def events_this_week ( context ) : request = context [ 'request' ] home = request . site . root_page cal = CalendarPage . objects . live ( ) . descendant_of ( home ) . first ( ) calUrl = cal . get_url ( request ) if cal else None calName = cal . title if cal else None today = dt . date . today ( ) beginOrd = today . toordinal ( ) if today . weekday ( ) != 6 : # Start week with Monday, unless today is Sunday beginOrd -= today . weekday ( ) endOrd = beginOrd + 6 dateFrom = dt . date . fromordinal ( beginOrd ) dateTo = dt . date . fromordinal ( endOrd ) if cal : events = cal . _getEventsByDay ( request , dateFrom , dateTo ) else : events = getAllEventsByDay ( request , dateFrom , dateTo ) return { 'request' : request , 'today' : today , 'calendarUrl' : calUrl , 'calendarName' : calName , 'events' : events }
Displays a week s worth of events . Starts week with Monday unless today is Sunday .
234
18
238,414
def minicalendar ( context ) : today = dt . date . today ( ) request = context [ 'request' ] home = request . site . root_page cal = CalendarPage . objects . live ( ) . descendant_of ( home ) . first ( ) calUrl = cal . get_url ( request ) if cal else None if cal : events = cal . _getEventsByWeek ( request , today . year , today . month ) else : events = getAllEventsByWeek ( request , today . year , today . month ) return { 'request' : request , 'today' : today , 'year' : today . year , 'month' : today . month , 'calendarUrl' : calUrl , 'monthName' : calendar . month_name [ today . month ] , 'weekdayInfo' : zip ( weekday_abbr , weekday_name ) , 'events' : events }
Displays a little ajax version of the calendar .
193
12
238,415
def subsite_upcoming_events ( context ) : request = context [ 'request' ] home = request . site . root_page return { 'request' : request , 'events' : getAllUpcomingEvents ( request , home = home ) }
Displays a list of all upcoming events in this site .
54
12
238,416
def group_upcoming_events ( context , group = None ) : request = context . get ( 'request' ) if group is None : group = context . get ( 'page' ) if group : events = getGroupUpcomingEvents ( request , group ) else : events = [ ] return { 'request' : request , 'events' : events }
Displays a list of all upcoming events that are assigned to a specific group . If the group is not specified it is assumed to be the current page .
75
31
238,417
def next_on ( context , rrevent = None ) : request = context [ 'request' ] if rrevent is None : rrevent = context . get ( 'page' ) eventNextOn = getattr ( rrevent , '_nextOn' , lambda _ : None ) return eventNextOn ( request )
Displays when the next occurence of a recurring event will be . If the recurring event is not specified it is assumed to be the current page .
70
31
238,418
def location_gmap ( context , location ) : gmapq = None if getattr ( MapFieldPanel , "UsingWagtailGMaps" , False ) : gmapq = location return { 'gmapq' : gmapq }
Display a link to Google maps iff we are using WagtailGMaps
53
15
238,419
def getGroupUpcomingEvents ( request , group ) : # Get events that are a child of a group page, or a postponement or extra # info a child of the recurring event child of the group rrEvents = RecurringEventPage . events ( request ) . exclude ( group_page = group ) . upcoming ( ) . child_of ( group ) . this ( ) qrys = [ SimpleEventPage . events ( request ) . exclude ( group_page = group ) . upcoming ( ) . child_of ( group ) . this ( ) , MultidayEventPage . events ( request ) . exclude ( group_page = group ) . upcoming ( ) . child_of ( group ) . this ( ) , rrEvents ] for rrEvent in rrEvents : qrys += [ PostponementPage . events ( request ) . child_of ( rrEvent . page ) . upcoming ( ) . this ( ) , ExtraInfoPage . events ( request ) . exclude ( extra_title = "" ) . child_of ( rrEvent . page ) . upcoming ( ) . this ( ) ] # Get events that are linked to a group page, or a postponement or extra # info a child of the recurring event linked to a group rrEvents = group . recurringeventpage_set ( manager = 'events' ) . auth ( request ) . upcoming ( ) . this ( ) qrys += [ group . simpleeventpage_set ( manager = 'events' ) . auth ( request ) . upcoming ( ) . this ( ) , group . multidayeventpage_set ( manager = 'events' ) . auth ( request ) . upcoming ( ) . this ( ) , rrEvents ] for rrEvent in rrEvents : qrys += [ PostponementPage . events ( request ) . child_of ( rrEvent . page ) . upcoming ( ) . this ( ) , ExtraInfoPage . events ( request ) . exclude ( extra_title = "" ) . child_of ( rrEvent . page ) . upcoming ( ) . this ( ) ] events = sorted ( chain . from_iterable ( qrys ) , key = attrgetter ( 'page._upcoming_datetime_from' ) ) return events
Return all the upcoming events that are assigned to the specified group .
474
13
238,420
def group ( self ) : retval = None parent = self . get_parent ( ) Group = get_group_model ( ) if issubclass ( parent . specific_class , Group ) : retval = parent . specific if retval is None : retval = self . group_page return retval
The group this event belongs to . Adding the event as a child of a group automatically assigns the event to that group .
65
24
238,421
def isAuthorized ( self , request ) : restrictions = self . get_view_restrictions ( ) if restrictions and request is None : return False else : return all ( restriction . accept_request ( request ) for restriction in restrictions )
Is the user authorized for the requested action with this event?
50
12
238,422
def _upcoming_datetime_from ( self ) : nextDt = self . __localAfter ( timezone . localtime ( ) , dt . time . max , excludeCancellations = True , excludeExtraInfo = True ) return nextDt
The datetime this event next starts in the local time zone or None if it is finished .
56
19
238,423
def _past_datetime_from ( self ) : prevDt = self . __localBefore ( timezone . localtime ( ) , dt . time . max , excludeCancellations = True , excludeExtraInfo = True ) return prevDt
The datetime this event previously started in the local time zone or None if it never did .
55
19
238,424
def _first_datetime_from ( self ) : myFromDt = self . _getMyFirstDatetimeFrom ( ) localTZ = timezone . get_current_timezone ( ) return myFromDt . astimezone ( localTZ )
The datetime this event first started in the local time zone or None if it never did .
57
19
238,425
def _getFromTime ( self , atDate = None ) : if atDate is None : atDate = timezone . localdate ( timezone = self . tz ) return getLocalTime ( atDate , self . time_from , self . tz )
What was the time of this event? Due to time zones that depends what day we are talking about . If no day is given assume today .
57
29
238,426
def _getFromDt ( self ) : myNow = timezone . localtime ( timezone = self . tz ) return self . __after ( myNow ) or self . __before ( myNow )
Get the datetime of the next event after or before now .
45
13
238,427
def _futureExceptions ( self , request ) : retval = [ ] # We know all future exception dates are in the parent time zone myToday = timezone . localdate ( timezone = self . tz ) for extraInfo in ExtraInfoPage . events ( request ) . child_of ( self ) . filter ( except_date__gte = myToday ) : retval . append ( extraInfo ) for cancellation in CancellationPage . events ( request ) . child_of ( self ) . filter ( except_date__gte = myToday ) : postponement = getattr ( cancellation , "postponementpage" , None ) if postponement : retval . append ( postponement ) else : retval . append ( cancellation ) retval . sort ( key = attrgetter ( 'except_date' ) ) # notice these are events not ThisEvents return retval
Returns all future extra info cancellations and postponements created for this recurring event
189
16
238,428
def _getMyFirstDatetimeFrom ( self ) : myStartDt = getAwareDatetime ( self . repeat . dtstart , None , self . tz , dt . time . min ) return self . __after ( myStartDt , excludeCancellations = False )
The datetime this event first started or None if it never did .
64
14
238,429
def _getMyFirstDatetimeTo ( self ) : myFirstDt = self . _getMyFirstDatetimeFrom ( ) if myFirstDt is not None : daysDelta = dt . timedelta ( days = self . num_days - 1 ) return getAwareDatetime ( myFirstDt . date ( ) + daysDelta , self . time_to , self . tz , dt . time . max )
The datetime this event first finished or None if it never did .
93
14
238,430
def local_title ( self ) : name = self . title . partition ( " for " ) [ 0 ] exceptDate = getLocalDate ( self . except_date , self . time_from , self . tz ) title = _ ( "{exception} for {date}" ) . format ( exception = _ ( name ) , date = dateFormat ( exceptDate ) ) return title
Localised version of the human - readable title of the page .
81
13
238,431
def full_clean ( self , * args , * * kwargs ) : name = getattr ( self , 'name' , self . slugName . title ( ) ) self . title = "{} for {}" . format ( name , dateFormat ( self . except_date ) ) self . slug = "{}-{}" . format ( self . except_date , self . slugName ) super ( ) . full_clean ( * args , * * kwargs )
Apply fixups that need to happen before per - field validation occurs . Sets the page s title .
101
20
238,432
def what ( self ) : originalFromDt = dt . datetime . combine ( self . except_date , timeFrom ( self . overrides . time_from ) ) changedFromDt = dt . datetime . combine ( self . date , timeFrom ( self . time_from ) ) originalDaysDelta = dt . timedelta ( days = self . overrides . num_days - 1 ) originalToDt = getAwareDatetime ( self . except_date + originalDaysDelta , self . overrides . time_to , self . tz ) changedDaysDelta = dt . timedelta ( days = self . num_days - 1 ) changedToDt = getAwareDatetime ( self . except_date + changedDaysDelta , self . time_to , self . tz ) if originalFromDt < changedFromDt : return _ ( "Postponed" ) elif originalFromDt > changedFromDt or originalToDt != changedToDt : return _ ( "Rescheduled" ) else : return None
May return a postponed or rescheduled string depending what the start and finish time of the event has been changed to .
230
24
238,433
def _convertTZ ( self ) : tz = timezone . get_current_timezone ( ) dtstart = self [ 'DTSTART' ] dtend = self [ 'DTEND' ] if dtstart . zone ( ) == "UTC" : dtstart . dt = dtstart . dt . astimezone ( tz ) if dtend . zone ( ) == "UTC" : dtend . dt = dtend . dt . astimezone ( tz )
Will convert UTC datetimes to the current local timezone
115
11
238,434
def to_naive_utc ( dtime ) : if not hasattr ( dtime , 'tzinfo' ) or dtime . tzinfo is None : return dtime dtime_utc = dtime . astimezone ( pytz . UTC ) dtime_naive = dtime_utc . replace ( tzinfo = None ) return dtime_naive
convert a datetime object to UTC and than remove the tzinfo if datetime is naive already return it
83
23
238,435
def create_timezone ( tz , first_date = None , last_date = None ) : if isinstance ( tz , pytz . tzinfo . StaticTzInfo ) : return _create_timezone_static ( tz ) # TODO last_date = None, recurring to infinity first_date = dt . datetime . today ( ) if not first_date else to_naive_utc ( first_date ) last_date = dt . datetime . today ( ) if not last_date else to_naive_utc ( last_date ) timezone = icalendar . Timezone ( ) timezone . add ( 'TZID' , tz ) # This is not a reliable way of determining if a transition is for # daylight savings. # From 1927 to 1941 New Zealand had GMT+11:30 (NZ Mean Time) as standard # and GMT+12:00 (NZ Summer Time) as daylight savings time. # From 1941 GMT+12:00 (NZ Standard Time) became standard time. # So NZST (NZ Summer/Standard Time) can refer to standard or daylight # savings time. And this code depends on the random order the _tzinfos # are returned. # dst = { # one[2]: 'DST' in two.__repr__() # for one, two in iter(tz._tzinfos.items()) # } # bst = { # one[2]: 'BST' in two.__repr__() # for one, two in iter(tz._tzinfos.items()) # } # ... # if dst[name] or bst[name]: # looking for the first and last transition time we need to include first_num , last_num = 0 , len ( tz . _utc_transition_times ) - 1 first_tt = tz . _utc_transition_times [ 0 ] last_tt = tz . _utc_transition_times [ - 1 ] for num , transtime in enumerate ( tz . _utc_transition_times ) : if transtime > first_tt and transtime < first_date : first_num = num first_tt = transtime if transtime < last_tt and transtime > last_date : last_num = num last_tt = transtime timezones = dict ( ) for num in range ( first_num , last_num + 1 ) : name = tz . _transition_info [ num ] [ 2 ] if name in timezones : ttime = tz . fromutc ( tz . _utc_transition_times [ num ] ) . replace ( tzinfo = None ) if 'RDATE' in timezones [ name ] : timezones [ name ] [ 'RDATE' ] . dts . append ( icalendar . prop . vDDDTypes ( ttime ) ) else : timezones [ name ] . add ( 'RDATE' , ttime ) continue if tz . _transition_info [ num ] [ 1 ] : subcomp = icalendar . TimezoneDaylight ( ) else : subcomp = icalendar . TimezoneStandard ( ) subcomp . add ( 'TZNAME' , tz . _transition_info [ num ] [ 2 ] ) subcomp . add ( 'DTSTART' , tz . fromutc ( tz . _utc_transition_times [ num ] ) . replace ( tzinfo = None ) ) subcomp . add ( 'TZOFFSETTO' , tz . _transition_info [ num ] [ 0 ] ) subcomp . add ( 'TZOFFSETFROM' , tz . _transition_info [ num - 1 ] [ 0 ] ) timezones [ name ] = subcomp for subcomp in timezones . values ( ) : timezone . add_component ( subcomp ) return timezone
create an icalendar vtimezone from a pytz . tzinfo object
871
17
238,436
def _create_timezone_static ( tz ) : timezone = icalendar . Timezone ( ) timezone . add ( 'TZID' , tz ) subcomp = icalendar . TimezoneStandard ( ) subcomp . add ( 'TZNAME' , tz ) subcomp . add ( 'DTSTART' , dt . datetime ( 1601 , 1 , 1 ) ) subcomp . add ( 'RDATE' , dt . datetime ( 1601 , 1 , 1 ) ) subcomp . add ( 'TZOFFSETTO' , tz . _utcoffset ) subcomp . add ( 'TZOFFSETFROM' , tz . _utcoffset ) timezone . add_component ( subcomp ) return timezone
create an icalendar vtimezone from a pytz . tzinfo . StaticTzInfo
171
21
238,437
def bend ( mapping , source , context = None ) : context = { } if context is None else context transport = Transport ( source , context ) return _bend ( mapping , transport )
The main bending function .
39
5
238,438
def protect ( self , protect_against = None ) : return ProtectedF ( self . _func , * self . _args , protect_against = protect_against , * * self . _kwargs )
Return a ProtectedF with the same parameters and with the given protect_against .
44
17
238,439
def init_logs ( args , tool = "NanoPlot" ) : start_time = dt . fromtimestamp ( time ( ) ) . strftime ( '%Y%m%d_%H%M' ) logname = os . path . join ( args . outdir , args . prefix + tool + "_" + start_time + ".log" ) handlers = [ logging . FileHandler ( logname ) ] if args . verbose : handlers . append ( logging . StreamHandler ( ) ) logging . basicConfig ( format = '%(asctime)s %(message)s' , handlers = handlers , level = logging . INFO ) logging . info ( '{} {} started with arguments {}' . format ( tool , __version__ , args ) ) logging . info ( 'Python version is: {}' . format ( sys . version . replace ( '\n' , ' ' ) ) ) return logname
Initiate log file and log arguments .
203
9
238,440
def flag_length_outliers ( df , columnname ) : return df [ columnname ] > ( np . median ( df [ columnname ] ) + 3 * np . std ( df [ columnname ] ) )
Return index of records with length - outliers above 3 standard deviations from the median .
47
17
238,441
def _raise_for_status ( response ) : http_error_msg = "" if 400 <= response . status_code < 500 : http_error_msg = "{0} Client Error: {1}" . format ( response . status_code , response . reason ) elif 500 <= response . status_code < 600 : http_error_msg = "{0} Server Error: {1}" . format ( response . status_code , response . reason ) if http_error_msg : try : more_info = response . json ( ) . get ( "message" ) except ValueError : more_info = None if more_info and more_info . lower ( ) != response . reason . lower ( ) : http_error_msg += ".\n\t{0}" . format ( more_info ) raise requests . exceptions . HTTPError ( http_error_msg , response = response )
Custom raise_for_status with more appropriate error message .
191
12
238,442
def _clear_empty_values ( args ) : result = { } for param in args : if args [ param ] is not None : result [ param ] = args [ param ] return result
Scrap junk data from a dict .
40
8
238,443
def authentication_validation ( username , password , access_token ) : if bool ( username ) is not bool ( password ) : raise Exception ( "Basic authentication requires a username AND" " password." ) if ( username and access_token ) or ( password and access_token ) : raise Exception ( "Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method." )
Only accept one form of authentication .
88
7
238,444
def _download_file ( url , local_filename ) : response = requests . get ( url , stream = True ) with open ( local_filename , 'wb' ) as outfile : for chunk in response . iter_content ( chunk_size = 1024 ) : if chunk : # filter out keep-alive new chunks outfile . write ( chunk )
Utility function that downloads a chunked response from the specified url to a local path . This method is suitable for larger downloads .
75
26
238,445
def set_permission ( self , dataset_identifier , permission = "private" , content_type = "json" ) : resource = _format_old_api_request ( dataid = dataset_identifier , content_type = content_type ) params = { "method" : "setPermission" , "value" : "public.read" if permission == "public" else permission } return self . _perform_request ( "put" , resource , params = params )
Set a dataset s permissions to private or public Options are private public
105
13
238,446
def get_metadata ( self , dataset_identifier , content_type = "json" ) : resource = _format_old_api_request ( dataid = dataset_identifier , content_type = content_type ) return self . _perform_request ( "get" , resource )
Retrieve the metadata for a particular dataset .
63
9
238,447
def download_attachments ( self , dataset_identifier , content_type = "json" , download_dir = "~/sodapy_downloads" ) : metadata = self . get_metadata ( dataset_identifier , content_type = content_type ) files = [ ] attachments = metadata [ 'metadata' ] . get ( "attachments" ) if not attachments : logging . info ( "No attachments were found or downloaded." ) return files download_dir = os . path . join ( os . path . expanduser ( download_dir ) , dataset_identifier ) if not os . path . exists ( download_dir ) : os . makedirs ( download_dir ) for attachment in attachments : file_path = os . path . join ( download_dir , attachment [ "filename" ] ) has_assetid = attachment . get ( "assetId" , False ) if has_assetid : base = _format_old_api_request ( dataid = dataset_identifier ) assetid = attachment [ "assetId" ] resource = "{0}/files/{1}?download=true&filename={2}" . format ( base , assetid , attachment [ "filename" ] ) else : base = "/api/assets" assetid = attachment [ "blobId" ] resource = "{0}/{1}?download=true" . format ( base , assetid ) uri = "{0}{1}{2}" . format ( self . uri_prefix , self . domain , resource ) _download_file ( uri , file_path ) files . append ( file_path ) logging . info ( "The following files were downloaded:\n\t{0}" . format ( "\n\t" . join ( files ) ) ) return files
Download all of the attachments associated with a dataset . Return the paths of downloaded files .
385
17
238,448
def replace_non_data_file ( self , dataset_identifier , params , file_data ) : resource = _format_old_api_request ( dataid = dataset_identifier , content_type = "txt" ) if not params . get ( 'method' , None ) : params [ 'method' ] = 'replaceBlob' params [ 'id' ] = dataset_identifier return self . _perform_request ( "post" , resource , params = params , files = file_data )
Same as create_non_data_file but replaces a file that already exists in a file - based dataset .
111
23
238,449
def _perform_update ( self , method , resource , payload ) : # python2/3 compatibility wizardry try : file_type = file except NameError : file_type = IOBase if isinstance ( payload , ( dict , list ) ) : response = self . _perform_request ( method , resource , data = json . dumps ( payload ) ) elif isinstance ( payload , file_type ) : headers = { "content-type" : "text/csv" , } response = self . _perform_request ( method , resource , data = payload , headers = headers ) else : raise Exception ( "Unrecognized payload {0}. Currently only list-, dictionary-," " and file-types are supported." . format ( type ( payload ) ) ) return response
Execute the update task .
167
6
238,450
def _perform_request ( self , request_type , resource , * * kwargs ) : request_type_methods = set ( [ "get" , "post" , "put" , "delete" ] ) if request_type not in request_type_methods : raise Exception ( "Unknown request type. Supported request types are" ": {0}" . format ( ", " . join ( request_type_methods ) ) ) uri = "{0}{1}{2}" . format ( self . uri_prefix , self . domain , resource ) # set a timeout, just to be safe kwargs [ "timeout" ] = self . timeout response = getattr ( self . session , request_type ) ( uri , * * kwargs ) # handle errors if response . status_code not in ( 200 , 202 ) : _raise_for_status ( response ) # when responses have no content body (ie. delete, set_permission), # simply return the whole response if not response . text : return response # for other request types, return most useful data content_type = response . headers . get ( 'content-type' ) . strip ( ) . lower ( ) if re . match ( r'application\/json' , content_type ) : return response . json ( ) elif re . match ( r'text\/csv' , content_type ) : csv_stream = StringIO ( response . text ) return [ line for line in csv . reader ( csv_stream ) ] elif re . match ( r'application\/rdf\+xml' , content_type ) : return response . content elif re . match ( r'text\/plain' , content_type ) : try : return json . loads ( response . text ) except ValueError : return response . text else : raise Exception ( "Unknown response format: {0}" . format ( content_type ) )
Utility method that performs all requests .
412
8
238,451
def exec_request ( self , URL ) : ## Throttle request, if need be interval = time . time ( ) - self . __ts_last_req if ( interval < self . __min_req_interval ) : time . sleep ( self . __min_req_interval - interval ) ## Construct and execute request headers = { "X-ELS-APIKey" : self . api_key , "User-Agent" : self . __user_agent , "Accept" : 'application/json' } if self . inst_token : headers [ "X-ELS-Insttoken" ] = self . inst_token logger . info ( 'Sending GET request to ' + URL ) r = requests . get ( URL , headers = headers ) self . __ts_last_req = time . time ( ) self . _status_code = r . status_code if r . status_code == 200 : self . _status_msg = 'data retrieved' return json . loads ( r . text ) else : self . _status_msg = "HTTP " + str ( r . status_code ) + " Error from " + URL + " and using headers " + str ( headers ) + ": " + r . text raise requests . HTTPError ( "HTTP " + str ( r . status_code ) + " Error from " + URL + "\nand using headers " + str ( headers ) + ":\n" + r . text )
Sends the actual request ; returns response .
310
9
238,452
def write ( self ) : if ( self . data ) : dataPath = self . client . local_dir / ( urllib . parse . quote_plus ( self . uri ) + '.json' ) with dataPath . open ( mode = 'w' ) as dump_file : json . dump ( self . data , dump_file ) dump_file . close ( ) logger . info ( 'Wrote ' + self . uri + ' to file' ) return True else : logger . warning ( 'No data to write for ' + self . uri ) return False
If data exists for the entity writes it to disk as a . JSON file with the url - encoded URI as the filename and returns True . Else returns False .
123
32
238,453
def write_docs ( self ) : if self . doc_list : dataPath = self . client . local_dir dump_file = open ( 'data/' + urllib . parse . quote_plus ( self . uri + '?view=documents' ) + '.json' , mode = 'w' ) dump_file . write ( '[' + json . dumps ( self . doc_list [ 0 ] ) ) for i in range ( 1 , len ( self . doc_list ) ) : dump_file . write ( ',' + json . dumps ( self . doc_list [ i ] ) ) dump_file . write ( ']' ) dump_file . close ( ) logger . info ( 'Wrote ' + self . uri + '?view=documents to file' ) return True else : logger . warning ( 'No doclist to write for ' + self . uri ) return False
If a doclist exists for the entity writes it to disk as a . JSON file with the url - encoded URI as the filename and returns True . Else returns False .
197
34
238,454
def read ( self , els_client = None ) : if ElsProfile . read ( self , self . __payload_type , els_client ) : return True else : return False
Reads the JSON representation of the author from ELSAPI . Returns True if successful ; else False .
42
21
238,455
def read ( self , els_client = None ) : if super ( ) . read ( self . __payload_type , els_client ) : return True else : return False
Reads the JSON representation of the document from ELSAPI . Returns True if successful ; else False .
40
21
238,456
def _extract_obo_synonyms ( rawterm ) : synonyms = set ( ) # keys in rawterm that define a synonym keys = set ( owl_synonyms ) . intersection ( rawterm . keys ( ) ) for k in keys : for s in rawterm [ k ] : synonyms . add ( Synonym ( s , owl_synonyms [ k ] ) ) return synonyms
Extract the synonyms defined in the rawterm .
85
11
238,457
def _extract_obo_relation ( cls , rawterm ) : relations = { } if 'subClassOf' in rawterm : relations [ Relationship ( 'is_a' ) ] = l = [ ] l . extend ( map ( cls . _get_id_from_url , rawterm . pop ( 'subClassOf' ) ) ) return relations
Extract the relationships defined in the rawterm .
79
10
238,458
def _relabel_to_obo ( d ) : return { owl_to_obo . get ( old_k , old_k ) : old_v for old_k , old_v in six . iteritems ( d ) }
Change the keys of d to use Obo labels .
51
11
238,459
def complement ( self ) : if self . complementary : #if self.complementary in self._instances.keys(): try : return self . _instances [ self . complementary ] except KeyError : raise ValueError ( '{} has a complementary but it was not defined !' ) else : return None
Return the complementary relationship of self .
65
7
238,460
def topdown ( cls ) : return tuple ( unique_everseen ( r for r in cls . _instances . values ( ) if r . direction == 'topdown' ) )
Get all topdown Relationship instances .
41
7
238,461
def bottomup ( cls ) : return tuple ( unique_everseen ( r for r in cls . _instances . values ( ) if r . direction == 'bottomup' ) )
Get all bottomup Relationship instances .
41
7
238,462
def unique_everseen ( iterable ) : # unique_everseen('AAAABBBCCDAABBB') --> A B C D seen = set ( ) seen_add = seen . add for element in six . moves . filterfalse ( seen . __contains__ , iterable ) : seen_add ( element ) yield element
List unique elements preserving order . Remember all elements ever seen .
70
12
238,463
def output_str ( f ) : if six . PY2 : #@functools.wraps(f) def new_f ( * args , * * kwargs ) : return f ( * args , * * kwargs ) . encode ( "utf-8" ) else : new_f = f return new_f
Create a function that always return instances of str .
73
10
238,464
def nowarnings ( func ) : @ functools . wraps ( func ) def new_func ( * args , * * kwargs ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( 'ignore' ) return func ( * args , * * kwargs ) return new_func
Create a function wrapped in a context that ignores warnings .
67
11
238,465
def parse ( self , stream , parser = None ) : force , parsers = self . _get_parsers ( parser ) try : stream . seek ( 0 ) lookup = stream . read ( 1024 ) stream . seek ( 0 ) except ( io . UnsupportedOperation , AttributeError ) : lookup = None for p in parsers : if p . hook ( path = self . path , force = force , lookup = lookup ) : self . meta , self . terms , self . imports , self . typedefs = p . parse ( stream ) self . _parsed_by = p . __name__ break
Parse the given file using available BaseParser instances .
131
11
238,466
def _get_parsers ( self , name ) : parserlist = BaseParser . __subclasses__ ( ) forced = name is None if isinstance ( name , ( six . text_type , six . binary_type ) ) : parserlist = [ p for p in parserlist if p . __name__ == name ] if not parserlist : raise ValueError ( "could not find parser: {}" . format ( name ) ) elif name is not None : raise TypeError ( "parser must be {types} or None, not {actual}" . format ( types = " or " . join ( [ six . text_type . __name__ , six . binary_type . __name__ ] ) , actual = type ( parser ) . __name__ , ) ) return not forced , parserlist
Return the appropriate parser asked by the user .
171
9
238,467
def adopt ( self ) : valid_relationships = set ( Relationship . _instances . keys ( ) ) relationships = [ ( parent , relation . complement ( ) , term . id ) for term in six . itervalues ( self . terms ) for relation in term . relations for parent in term . relations [ relation ] if relation . complementary and relation . complementary in valid_relationships ] relationships . sort ( key = operator . itemgetter ( 2 ) ) for parent , rel , child in relationships : if rel is None : break try : parent = parent . id except AttributeError : pass if parent in self . terms : try : if child not in self . terms [ parent ] . relations [ rel ] : self . terms [ parent ] . relations [ rel ] . append ( child ) except KeyError : self [ parent ] . relations [ rel ] = [ child ] del relationships
Make terms aware of their children .
185
7
238,468
def reference ( self ) : for termkey , termval in six . iteritems ( self . terms ) : termval . relations . update ( ( relkey , TermList ( ( self . terms . get ( x ) or Term ( x , '' , '' ) if not isinstance ( x , Term ) else x ) for x in relval ) ) for relkey , relval in six . iteritems ( termval . relations ) )
Make relations point to ontology terms instead of term ids .
92
13
238,469
def resolve_imports ( self , imports , import_depth , parser = None ) : if imports and import_depth : for i in list ( self . imports ) : try : if os . path . exists ( i ) or i . startswith ( ( 'http' , 'ftp' ) ) : self . merge ( Ontology ( i , import_depth = import_depth - 1 , parser = parser ) ) else : # try to look at neighbouring ontologies self . merge ( Ontology ( os . path . join ( os . path . dirname ( self . path ) , i ) , import_depth = import_depth - 1 , parser = parser ) ) except ( IOError , OSError , URLError , HTTPError , _etree . ParseError ) as e : warnings . warn ( "{} occured during import of " "{}" . format ( type ( e ) . __name__ , i ) , ProntoWarning )
Import required ontologies .
205
5
238,470
def include ( self , * terms ) : ref_needed = False for term in terms : if isinstance ( term , TermList ) : ref_needed = ref_needed or self . _include_term_list ( term ) elif isinstance ( term , Term ) : ref_needed = ref_needed or self . _include_term ( term ) else : raise TypeError ( 'include only accepts <Term> or <TermList> as arguments' ) self . adopt ( ) self . reference ( )
Add new terms to the current ontology .
107
9
238,471
def merge ( self , other ) : if not isinstance ( other , Ontology ) : raise TypeError ( "'merge' requires an Ontology as argument," " not {}" . format ( type ( other ) ) ) self . terms . update ( other . terms ) self . _empty_cache ( ) self . adopt ( ) self . reference ( )
Merge another ontology into the current one .
75
10
238,472
def _include_term_list ( self , termlist ) : ref_needed = False for term in termlist : ref_needed = ref_needed or self . _include_term ( term ) return ref_needed
Add terms from a TermList to the ontology .
46
11
238,473
def _include_term ( self , term ) : ref_needed = False if term . relations : for k , v in six . iteritems ( term . relations ) : for i , t in enumerate ( v ) : #if isinstance(t, Term): try : if t . id not in self : self . _include_term ( t ) v [ i ] = t . id except AttributeError : pass ref_needed = True self . terms [ term . id ] = term return ref_needed
Add a single term to the current ontology .
108
10
238,474
def _empty_cache ( self , termlist = None ) : if termlist is None : for term in six . itervalues ( self . terms ) : term . _empty_cache ( ) else : for term in termlist : try : self . terms [ term . id ] . _empty_cache ( ) except AttributeError : self . terms [ term ] . _empty_cache ( )
Empty the cache associated with each Term instance .
86
9
238,475
def _obo_meta ( self ) : metatags = ( "format-version" , "data-version" , "date" , "saved-by" , "auto-generated-by" , "import" , "subsetdef" , "synonymtypedef" , "default-namespace" , "namespace-id-rule" , "idspace" , "treat-xrefs-as-equivalent" , "treat-xrefs-as-genus-differentia" , "treat-xrefs-as-is_a" , "remark" , "ontology" ) meta = self . meta . copy ( ) meta [ 'auto-generated-by' ] = [ 'pronto v{}' . format ( __version__ ) ] meta [ 'date' ] = [ datetime . datetime . now ( ) . strftime ( '%d:%m:%Y %H:%M' ) ] obo_meta = "\n" . join ( [ # official obo tags x . obo if hasattr ( x , 'obo' ) else "{}: {}" . format ( k , x ) for k in metatags [ : - 1 ] for x in meta . get ( k , ( ) ) ] + [ # eventual other metadata added to remarksmock.patch in production code "remark: {}: {}" . format ( k , x ) for k , v in sorted ( six . iteritems ( meta ) , key = operator . itemgetter ( 0 ) ) for x in v if k not in metatags ] + ( [ "ontology: {}" . format ( x ) for x in meta [ "ontology" ] ] if "ontology" in meta else [ "ontology: {}" . format ( meta [ "namespace" ] [ 0 ] . lower ( ) ) ] if "namespace" in meta else [ ] ) ) return obo_meta
Generate the obo metadata header and updates metadata .
426
11
238,476
def _empty_cache ( self ) : self . _children , self . _parents = None , None self . _rchildren , self . _rparents = { } , { }
Empty the cache of the Term s memoized functions .
39
11
238,477
def _check_section ( line , section ) : if "[Term]" in line : section = OboSection . term elif "[Typedef]" in line : section = OboSection . typedef return section
Update the section being parsed .
45
6
238,478
def _parse_metadata ( cls , line , meta , parse_remarks = True ) : key , value = line . split ( ':' , 1 ) key , value = key . strip ( ) , value . strip ( ) if parse_remarks and "remark" in key : # Checking that the ':' is not if 0 < value . find ( ': ' ) < 20 : # not too far avoid parsing a sentence try : # containing a ':' as a key: value cls . _parse_metadata ( value , meta , parse_remarks ) # obo statement nested in a remark except ValueError : # (20 is arbitrary, it may require pass # tweaking) else : meta [ key ] . append ( value ) try : syn_type_def = [ ] for m in meta [ 'synonymtypedef' ] : if not isinstance ( m , SynonymType ) : x = SynonymType . from_obo ( m ) syn_type_def . append ( x ) else : syn_type_def . append ( m ) except KeyError : pass else : meta [ 'synonymtypedef' ] = syn_type_def
Parse a metadata line .
250
6
238,479
def _parse_typedef ( line , _rawtypedef ) : if "[Typedef]" in line : _rawtypedef . append ( collections . defaultdict ( list ) ) else : key , value = line . split ( ':' , 1 ) _rawtypedef [ - 1 ] [ key . strip ( ) ] . append ( value . strip ( ) )
Parse a typedef line .
82
7
238,480
def _parse_term ( _rawterms ) : line = yield _rawterms . append ( collections . defaultdict ( list ) ) while True : line = yield if "[Term]" in line : _rawterms . append ( collections . defaultdict ( list ) ) else : key , value = line . split ( ':' , 1 ) _rawterms [ - 1 ] [ key . strip ( ) ] . append ( value . strip ( ) )
Parse a term line .
93
6
238,481
def _classify ( _rawtypedef , _rawterms ) : terms = collections . OrderedDict ( ) _cached_synonyms = { } typedefs = [ Relationship . _from_obo_dict ( # instantiate a new Relationship { k : v for k , lv in six . iteritems ( _typedef ) for v in lv } ) for _typedef in _rawtypedef ] for _term in _rawterms : synonyms = set ( ) _id = _term [ 'id' ] [ 0 ] _name = _term . pop ( 'name' , ( '' , ) ) [ 0 ] _desc = _term . pop ( 'def' , ( '' , ) ) [ 0 ] _relations = collections . defaultdict ( list ) try : for other in _term . get ( 'is_a' , ( ) ) : _relations [ Relationship ( 'is_a' ) ] . append ( other . split ( '!' ) [ 0 ] . strip ( ) ) except IndexError : pass try : for relname , other in ( x . split ( ' ' , 1 ) for x in _term . pop ( 'relationship' , ( ) ) ) : _relations [ Relationship ( relname ) ] . append ( other . split ( '!' ) [ 0 ] . strip ( ) ) except IndexError : pass for key , scope in six . iteritems ( _obo_synonyms_map ) : for obo_header in _term . pop ( key , ( ) ) : try : s = _cached_synonyms [ obo_header ] except KeyError : s = Synonym . from_obo ( obo_header , scope ) _cached_synonyms [ obo_header ] = s finally : synonyms . add ( s ) desc = Description . from_obo ( _desc ) if _desc else Description ( "" ) terms [ _id ] = Term ( _id , _name , desc , dict ( _relations ) , synonyms , dict ( _term ) ) return terms , typedefs
Create proper objects out of extracted dictionnaries .
445
10
238,482
def calculate_first_digit ( number ) : sum = 0 if len ( number ) == 9 : weights = CPF_WEIGHTS [ 0 ] else : weights = CNPJ_WEIGHTS [ 0 ] for i in range ( len ( number ) ) : sum = sum + int ( number [ i ] ) * weights [ i ] rest_division = sum % DIVISOR if rest_division < 2 : return '0' return str ( 11 - rest_division )
This function calculates the first check digit of a cpf or cnpj .
101
16
238,483
def validate ( number ) : clean_number = clear_punctuation ( number ) if len ( clean_number ) == 11 : return cpf . validate ( clean_number ) elif len ( clean_number ) == 14 : return cnpj . validate ( clean_number ) return False
This functions acts like a Facade to the other modules cpf and cnpj and validates either CPF and CNPJ numbers . Feel free to use this or the other modules directly .
62
40
238,484
def validate ( cpf_number ) : _cpf = compat . clear_punctuation ( cpf_number ) if ( len ( _cpf ) != 11 or len ( set ( _cpf ) ) == 1 ) : return False first_part = _cpf [ : 9 ] second_part = _cpf [ : 10 ] first_digit = _cpf [ 9 ] second_digit = _cpf [ 10 ] if ( first_digit == calc . calculate_first_digit ( first_part ) and second_digit == calc . calculate_second_digit ( second_part ) ) : return True return False
This function validates a CPF number .
135
9
238,485
def validate ( cnpj_number ) : _cnpj = compat . clear_punctuation ( cnpj_number ) if ( len ( _cnpj ) != 14 or len ( set ( _cnpj ) ) == 1 ) : return False first_part = _cnpj [ : 12 ] second_part = _cnpj [ : 13 ] first_digit = _cnpj [ 12 ] second_digit = _cnpj [ 13 ] if ( first_digit == calc . calculate_first_digit ( first_part ) and second_digit == calc . calculate_second_digit ( second_part ) ) : return True return False
This function validates a CNPJ number .
144
10
238,486
def xml_open ( filename , expected_root = None ) : # Is the file a zip (.twbx or .tdsx) if zipfile . is_zipfile ( filename ) : tree = get_xml_from_archive ( filename ) else : tree = ET . parse ( filename ) # Is the file a supported version tree_root = tree . getroot ( ) file_version = Version ( tree_root . attrib . get ( 'version' , '0.0' ) ) if file_version < MIN_SUPPORTED_VERSION : raise TableauVersionNotSupportedException ( file_version ) # Does the root tag match the object type (workbook or data source) if expected_root and ( expected_root != tree_root . tag ) : raise TableauInvalidFileException ( "'{}'' is not a valid '{}' file" . format ( filename , expected_root ) ) return tree
Opens the provided filename . Handles detecting if the file is an archive detecting the document version and validating the root tag .
198
26
238,487
def build_archive_file ( archive_contents , zip_file ) : # This is tested against Desktop and Server, and reverse engineered by lots # of trial and error. Do not change this logic. for root_dir , _ , files in os . walk ( archive_contents ) : relative_dir = os . path . relpath ( root_dir , archive_contents ) for f in files : temp_file_full_path = os . path . join ( archive_contents , relative_dir , f ) zipname = os . path . join ( relative_dir , f ) zip_file . write ( temp_file_full_path , arcname = zipname )
Build a Tableau - compatible archive file .
147
9
238,488
def from_attributes ( cls , server , dbname , username , dbclass , port = None , query_band = None , initial_sql = None , authentication = '' ) : root = ET . Element ( 'connection' , authentication = authentication ) xml = cls ( root ) xml . server = server xml . dbname = dbname xml . username = username xml . dbclass = dbclass xml . port = port xml . query_band = query_band xml . initial_sql = initial_sql return xml
Creates a new connection that can be added into a Data Source . defaults to which will be treated as prompt by Tableau .
110
26
238,489
def dbname ( self , value ) : self . _dbname = value self . _connectionXML . set ( 'dbname' , value )
Set the connection s database name property .
32
8
238,490
def server ( self , value ) : self . _server = value self . _connectionXML . set ( 'server' , value )
Set the connection s server property .
29
7
238,491
def username ( self , value ) : self . _username = value self . _connectionXML . set ( 'username' , value )
Set the connection s username property .
29
7
238,492
def dbclass ( self , value ) : if not is_valid_dbclass ( value ) : raise AttributeError ( "'{}' is not a valid database type" . format ( value ) ) self . _class = value self . _connectionXML . set ( 'class' , value )
Set the connection s dbclass property .
64
8
238,493
def port ( self , value ) : self . _port = value # If port is None we remove the element and don't write it to XML if value is None : try : del self . _connectionXML . attrib [ 'port' ] except KeyError : pass else : self . _connectionXML . set ( 'port' , value )
Set the connection s port property .
74
7
238,494
def query_band ( self , value ) : self . _query_band = value # If query band is None we remove the element and don't write it to XML if value is None : try : del self . _connectionXML . attrib [ 'query-band-spec' ] except KeyError : pass else : self . _connectionXML . set ( 'query-band-spec' , value )
Set the connection s query_band property .
87
9
238,495
def initial_sql ( self , value ) : self . _initial_sql = value # If initial_sql is None we remove the element and don't write it to XML if value is None : try : del self . _connectionXML . attrib [ 'one-time-sql' ] except KeyError : pass else : self . _connectionXML . set ( 'one-time-sql' , value )
Set the connection s initial_sql property .
88
9
238,496
def base36encode ( number ) : ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz" base36 = '' sign = '' if number < 0 : sign = '-' number = - number if 0 <= number < len ( ALPHABET ) : return sign + ALPHABET [ number ] while number != 0 : number , i = divmod ( number , len ( ALPHABET ) ) base36 = ALPHABET [ i ] + base36 return sign + base36
Converts an integer into a base36 string .
118
10
238,497
def get_connections ( self ) : if float ( self . _dsversion ) < 10 : connections = self . _extract_legacy_connection ( ) else : connections = self . _extract_federated_connections ( ) return connections
Find and return all connections based on file format version .
55
11
238,498
def from_connections ( cls , caption , connections ) : root = ET . Element ( 'datasource' , caption = caption , version = '10.0' , inline = 'true' ) outer_connection = ET . SubElement ( root , 'connection' ) outer_connection . set ( 'class' , 'federated' ) named_conns = ET . SubElement ( outer_connection , 'named-connections' ) for conn in connections : nc = ET . SubElement ( named_conns , 'named-connection' , name = _make_unique_name ( conn . dbclass ) , caption = conn . server ) nc . append ( conn . _connectionXML ) return cls ( root )
Create a new Data Source give a list of Connections .
159
12
238,499
def name ( self ) : alias = getattr ( self , 'alias' , None ) if alias : return alias caption = getattr ( self , 'caption' , None ) if caption : return caption return self . id
Provides a nice name for the field which is derived from the alias caption or the id .
47
19