idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
37,600
def get_subregions ( self , iso_code ) : items = OrderedDict ( ) for key , value in self . region_registry . items ( ) : code_elements , is_subregion = self . _code_elements ( key ) if is_subregion and code_elements [ 0 ] == iso_code : items [ key ] = value return items
Returns subregion calendar classes for given region iso_code .
37,601
def items ( self , region_codes , include_subregions = False ) : items = OrderedDict ( ) for code in region_codes : try : items [ code ] = self . region_registry [ code ] except KeyError : continue if include_subregions : items . update ( self . get_subregions ( code ) ) return items
Returns calendar classes for regions
37,602
def cleaned_date ( day , keep_datetime = False ) : if not isinstance ( day , ( date , datetime ) ) : raise UnsupportedDateType ( "`{}` is of unsupported type ({})" . format ( day , type ( day ) ) ) if not keep_datetime : if hasattr ( day , 'date' ) and callable ( day . date ) : day = day . date ( ) return day
Return a clean date type .
37,603
def get_fixed_holidays ( self , year ) : days = [ ] for month , day , label in self . FIXED_HOLIDAYS : days . append ( ( date ( year , month , day ) , label ) ) return days
Return the fixed days according to the FIXED_HOLIDAYS class property
37,604
def get_holiday_label ( self , day ) : day = cleaned_date ( day ) return { day : label for day , label in self . holidays ( day . year ) } . get ( day )
Return the label of the holiday if the date is a holiday
37,605
def is_working_day ( self , day , extra_working_days = None , extra_holidays = None ) : day = cleaned_date ( day ) if extra_working_days : extra_working_days = tuple ( map ( cleaned_date , extra_working_days ) ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) if extra_working_days and day in extra_working_days : return True if day . weekday ( ) in self . get_weekend_days ( ) : return False return not self . is_holiday ( day , extra_holidays = extra_holidays )
Return True if it s a working day . In addition to the regular holidays you can add exceptions .
37,606
def is_holiday ( self , day , extra_holidays = None ) : day = cleaned_date ( day ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) if extra_holidays and day in extra_holidays : return True return day in self . holidays_set ( day . year )
Return True if it s an holiday . In addition to the regular holidays you can add exceptions .
37,607
def add_working_days ( self , day , delta , extra_working_days = None , extra_holidays = None , keep_datetime = False ) : day = cleaned_date ( day , keep_datetime ) if extra_working_days : extra_working_days = tuple ( map ( cleaned_date , extra_working_days ) ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) days = 0 temp_day = day if type ( temp_day ) is datetime and not keep_datetime : temp_day = temp_day . date ( ) day_added = 1 if delta >= 0 else - 1 delta = abs ( delta ) while days < delta : temp_day = temp_day + timedelta ( days = day_added ) if self . is_working_day ( temp_day , extra_working_days = extra_working_days , extra_holidays = extra_holidays ) : days += 1 return temp_day
Add delta working days to the date .
37,608
def sub_working_days ( self , day , delta , extra_working_days = None , extra_holidays = None , keep_datetime = False ) : delta = abs ( delta ) return self . add_working_days ( day , - delta , extra_working_days , extra_holidays , keep_datetime = keep_datetime )
Substract delta working days to the date .
37,609
def find_following_working_day ( self , day ) : day = cleaned_date ( day ) while day . weekday ( ) in self . get_weekend_days ( ) : day = day + timedelta ( days = 1 ) return day
Looks for the following working day if not already a working day .
37,610
def get_first_weekday_after ( day , weekday ) : day_delta = ( weekday - day . weekday ( ) ) % 7 day = day + timedelta ( days = day_delta ) return day
Get the first weekday after a given day . If the day is the same weekday the same day will be returned .
37,611
def get_working_days_delta ( self , start , end ) : start = cleaned_date ( start ) end = cleaned_date ( end ) if start == end : return 0 if start > end : start , end = end , start count = 0 while start < end : start += timedelta ( days = 1 ) if self . is_working_day ( start ) : count += 1 return count
Return the number of working day between two given dates . The order of the dates provided doesn t matter .
37,612
def get_holy_thursday ( self , year ) : "Return the date of the last thursday before easter" sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 3 )
Return the date of the last thursday before easter
37,613
def get_good_friday ( self , year ) : "Return the date of the last friday before easter" sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 2 )
Return the date of the last friday before easter
37,614
def get_clean_monday ( self , year ) : "Return the clean monday date" sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 48 )
Return the clean monday date
37,615
def get_easter_saturday ( self , year ) : "Return the Easter Saturday date" sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 1 )
Return the Easter Saturday date
37,616
def get_easter_monday ( self , year ) : "Return the date of the monday after easter" sunday = self . get_easter_sunday ( year ) return sunday + timedelta ( days = 1 )
Return the date of the monday after easter
37,617
def get_variable_days ( self , year ) : "Return the christian holidays list according to the mixin" days = super ( ChristianMixin , self ) . get_variable_days ( year ) if self . include_epiphany : days . append ( ( date ( year , 1 , 6 ) , "Epiphany" ) ) if self . include_clean_monday : days . append ( ( self . get_clean_monday ( year ) , "Clean Monday" ) ) if self . include_annunciation : days . append ( ( date ( year , 3 , 25 ) , "Annunciation" ) ) if self . include_ash_wednesday : days . append ( ( self . get_ash_wednesday ( year ) , self . ash_wednesday_label ) ) if self . include_palm_sunday : days . append ( ( self . get_palm_sunday ( year ) , "Palm Sunday" ) ) if self . include_holy_thursday : days . append ( ( self . get_holy_thursday ( year ) , "Holy Thursday" ) ) if self . include_good_friday : days . append ( ( self . get_good_friday ( year ) , self . good_friday_label ) ) if self . include_easter_saturday : days . append ( ( self . get_easter_saturday ( year ) , "Easter Saturday" ) ) if self . include_easter_sunday : days . append ( ( self . get_easter_sunday ( year ) , "Easter Sunday" ) ) if self . include_easter_monday : days . append ( ( self . get_easter_monday ( year ) , "Easter Monday" ) ) if self . include_assumption : days . append ( ( date ( year , 8 , 15 ) , "Assumption of Mary to Heaven" ) ) if self . include_all_saints : days . append ( ( date ( year , 11 , 1 ) , "All Saints Day" ) ) if self . include_all_souls : days . append ( ( date ( year , 11 , 2 ) , "All Souls Day" ) ) if self . include_immaculate_conception : days . append ( ( date ( year , 12 , 8 ) , self . immaculate_conception_label ) ) if self . include_christmas : days . append ( ( date ( year , 12 , 25 ) , "Christmas Day" ) ) if self . include_christmas_eve : days . append ( ( date ( year , 12 , 24 ) , "Christmas Eve" ) ) if self . include_boxing_day : days . append ( ( date ( year , 12 , 26 ) , self . boxing_day_label ) ) if self . include_ascension : days . append ( ( self . get_ascension_thursday ( year ) , "Ascension Thursday" ) ) if self . include_whit_monday : days . append ( ( self . get_whit_monday ( year ) , self . whit_monday_label ) ) if self . include_whit_sunday : days . append ( ( self . get_whit_sunday ( year ) , self . whit_sunday_label ) ) if self . include_corpus_christi : days . append ( ( self . get_corpus_christi ( year ) , "Corpus Christi" ) ) return days
Return the christian holidays list according to the mixin
37,618
def get_chinese_new_year ( self , year ) : days = [ ] lunar_first_day = ChineseNewYearCalendar . lunar ( year , 1 , 1 ) if self . include_chinese_new_year_eve : days . append ( ( lunar_first_day - timedelta ( days = 1 ) , self . chinese_new_year_eve_label ) ) if self . include_chinese_new_year : days . append ( ( lunar_first_day , self . chinese_new_year_label ) ) if self . include_chinese_second_day : lunar_second_day = lunar_first_day + timedelta ( days = 1 ) days . append ( ( lunar_second_day , self . chinese_second_day_label ) ) if self . include_chinese_third_day : lunar_third_day = lunar_first_day + timedelta ( days = 2 ) days . append ( ( lunar_third_day , self . chinese_third_day_label ) ) if self . shift_sunday_holidays : if lunar_first_day . weekday ( ) == SUN : if self . shift_start_cny_sunday : days . append ( ( lunar_first_day - timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) else : if self . include_chinese_third_day : shift_day = lunar_third_day else : shift_day = lunar_second_day days . append ( ( shift_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) if ( lunar_second_day . weekday ( ) == SUN and self . include_chinese_third_day ) : days . append ( ( lunar_third_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) return days
Compute Chinese New Year days . To return a list of holidays .
37,619
def get_shifted_holidays ( self , dates ) : for holiday , label in dates : if holiday . weekday ( ) == SUN : yield ( holiday + timedelta ( days = 1 ) , label + ' shift' )
Taking a list of existing holidays yield a list of shifted days if the holiday falls on SUN .
37,620
def get_calendar_holidays ( self , year ) : days = super ( ChineseNewYearCalendar , self ) . get_calendar_holidays ( year ) if self . shift_sunday_holidays : days_to_inspect = copy ( days ) for day_shifted in self . get_shifted_holidays ( days_to_inspect ) : days . append ( day_shifted ) return days
Take into account the eventual shift to the next MON if any holiday falls on SUN .
37,621
def calculate_equinoxes ( self , year , timezone = 'UTC' ) : tz = pytz . timezone ( timezone ) d1 = ephem . next_equinox ( str ( year ) ) d = ephem . Date ( str ( d1 ) ) equinox1 = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) d2 = ephem . next_equinox ( d1 ) d = ephem . Date ( str ( d2 ) ) equinox2 = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) return ( equinox1 . date ( ) , equinox2 . date ( ) )
calculate equinox with time zone
37,622
def solar_term ( self , year , degrees , timezone = 'UTC' ) : twopi = 2 * pi tz = pytz . timezone ( timezone ) sun = ephem . Sun ( ephem . Date ( str ( year ) ) ) current_longitude = sun . hlong - pi target_longitude = degrees * ephem . degree difference = ( target_longitude - current_longitude ) % twopi t0 = ephem . Date ( str ( year ) ) + 365.25 * difference / twopi def f ( t ) : sun . compute ( t ) longitude = sun . hlong - pi return ephem . degrees ( target_longitude - longitude ) . znorm d = ephem . Date ( ephem . newton ( f , t0 , t0 + ephem . minute ) ) solar_term = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) return solar_term . date ( )
Returns the date of the solar term for the given longitude and the given year .
37,623
def get_spring_holiday ( self , year ) : easter = self . get_easter_monday ( year ) spring_holiday = self . get_nth_weekday_in_month ( year , 4 , MON , 3 ) if easter == spring_holiday : spring_holiday = self . get_nth_weekday_in_month ( year , 4 , MON , 2 ) return ( spring_holiday , self . spring_holiday_label )
Return Spring Holiday for Edinburgh .
37,624
def get_victoria_day ( self , year ) : may_24th = date ( year , 5 , 24 ) shift = may_24th . weekday ( ) or 7 victoria_day = may_24th - timedelta ( days = shift ) return ( victoria_day , "Victoria Day" )
Return Victoria Day for Edinburgh .
37,625
def read_relative_file ( filename ) : path = join ( dirname ( abspath ( __file__ ) ) , filename ) with io . open ( path , encoding = 'utf-8' ) as f : return f . read ( )
Return the contents of the given file .
37,626
def lonely_buckets ( self ) : hrago = time . monotonic ( ) - 3600 return [ b for b in self . buckets if b . last_updated < hrago ]
Get all of the buckets that haven t been updated in over an hour .
37,627
def get_bucket_for ( self , node ) : for index , bucket in enumerate ( self . buckets ) : if node . long_id < bucket . range [ 1 ] : return index return None
Get the index of the bucket that the given node would fall into .
37,628
async def _find ( self , rpcmethod ) : log . info ( "crawling network with nearest: %s" , str ( tuple ( self . nearest ) ) ) count = self . alpha if self . nearest . get_ids ( ) == self . last_ids_crawled : count = len ( self . nearest ) self . last_ids_crawled = self . nearest . get_ids ( ) dicts = { } for peer in self . nearest . get_uncontacted ( ) [ : count ] : dicts [ peer . id ] = rpcmethod ( peer , self . node ) self . nearest . mark_contacted ( peer ) found = await gather_dict ( dicts ) return await self . _nodes_found ( found )
Get either a value or list of nodes .
37,629
def check_dht_value_type ( value ) : typeset = [ int , float , bool , str , bytes ] return type ( value ) in typeset
Checks to see if the type of the value is a valid type for placing in the dht .
37,630
async def listen ( self , port , interface = '0.0.0.0' ) : loop = asyncio . get_event_loop ( ) listen = loop . create_datagram_endpoint ( self . _create_protocol , local_addr = ( interface , port ) ) log . info ( "Node %i listening on %s:%i" , self . node . long_id , interface , port ) self . transport , self . protocol = await listen self . refresh_table ( )
Start listening on the given port .
37,631
async def bootstrap ( self , addrs ) : log . debug ( "Attempting to bootstrap node with %i initial contacts" , len ( addrs ) ) cos = list ( map ( self . bootstrap_node , addrs ) ) gathered = await asyncio . gather ( * cos ) nodes = [ node for node in gathered if node is not None ] spider = NodeSpiderCrawl ( self . protocol , self . node , nodes , self . ksize , self . alpha ) return await spider . find ( )
Bootstrap the server by connecting to other known nodes in the network .
37,632
async def get ( self , key ) : log . info ( "Looking up key %s" , key ) dkey = digest ( key ) if self . storage . get ( dkey ) is not None : return self . storage . get ( dkey ) node = Node ( dkey ) nearest = self . protocol . router . find_neighbors ( node ) if not nearest : log . warning ( "There are no known neighbors to get key %s" , key ) return None spider = ValueSpiderCrawl ( self . protocol , node , nearest , self . ksize , self . alpha ) return await spider . find ( )
Get a key if the network has it .
37,633
async def set ( self , key , value ) : if not check_dht_value_type ( value ) : raise TypeError ( "Value must be of type int, float, bool, str, or bytes" ) log . info ( "setting '%s' = '%s' on network" , key , value ) dkey = digest ( key ) return await self . set_digest ( dkey , value )
Set the given string key to the given value in the network .
37,634
def save_state_regularly ( self , fname , frequency = 600 ) : self . save_state ( fname ) loop = asyncio . get_event_loop ( ) self . save_state_loop = loop . call_later ( frequency , self . save_state_regularly , fname , frequency )
Save the state of node with a given regularity to the given filename .
37,635
def push ( self , nodes ) : if not isinstance ( nodes , list ) : nodes = [ nodes ] for node in nodes : if node not in self : distance = self . node . distance_to ( node ) heapq . heappush ( self . heap , ( distance , node ) )
Push nodes onto heap .
37,636
def shared_prefix ( args ) : i = 0 while i < min ( map ( len , args ) ) : if len ( set ( map ( operator . itemgetter ( i ) , args ) ) ) != 1 : break i += 1 return args [ 0 ] [ : i ]
Find the shared prefix between the strings .
37,637
def get_refresh_ids ( self ) : ids = [ ] for bucket in self . router . lonely_buckets ( ) : rid = random . randint ( * bucket . range ) . to_bytes ( 20 , byteorder = 'big' ) ids . append ( rid ) return ids
Get ids to search for to keep old buckets up to date .
37,638
def handle_call_response ( self , result , node ) : if not result [ 0 ] : log . warning ( "no response from %s, removing from router" , node ) self . router . remove_contact ( node ) return result log . info ( "got successful response from %s" , node ) self . welcome_if_new ( node ) return result
If we get a response add the node to the routing table . If we get no response make sure it s removed from the routing table .
37,639
def deinit ( self ) : for i in range ( len ( self . buf ) ) : self . buf [ i ] = 0 neopixel_write ( self . pin , self . buf ) self . pin . deinit ( )
Blank out the NeoPixels and release the pin .
37,640
def show ( self ) : if self . brightness > 0.99 : neopixel_write ( self . pin , self . buf ) else : neopixel_write ( self . pin , bytearray ( [ int ( i * self . brightness ) for i in self . buf ] ) )
Shows the new colors on the pixels themselves if they haven t already been autowritten .
37,641
def _set_k8s_attribute ( obj , attribute , value ) : current_value = None attribute_name = None for python_attribute , json_attribute in obj . attribute_map . items ( ) : if json_attribute == attribute : attribute_name = python_attribute break else : raise ValueError ( 'Attribute must be one of {}' . format ( obj . attribute_map . values ( ) ) ) if hasattr ( obj , attribute_name ) : current_value = getattr ( obj , attribute_name ) if current_value is not None : current_value = SERIALIZATION_API_CLIENT . sanitize_for_serialization ( current_value ) if isinstance ( current_value , dict ) : setattr ( obj , attribute_name , merge_dictionaries ( current_value , value ) ) elif isinstance ( current_value , list ) : setattr ( obj , attribute_name , current_value + value ) else : setattr ( obj , attribute_name , value )
Set a specific value on a kubernetes object s attribute
37,642
def merge_dictionaries ( a , b , path = None , update = True ) : if path is None : path = [ ] for key in b : if key in a : if isinstance ( a [ key ] , dict ) and isinstance ( b [ key ] , dict ) : merge_dictionaries ( a [ key ] , b [ key ] , path + [ str ( key ) ] ) elif a [ key ] == b [ key ] : pass elif isinstance ( a [ key ] , list ) and isinstance ( b [ key ] , list ) : for idx , val in enumerate ( b [ key ] ) : a [ key ] [ idx ] = merge_dictionaries ( a [ key ] [ idx ] , b [ key ] [ idx ] , path + [ str ( key ) , str ( idx ) ] , update = update ) elif update : a [ key ] = b [ key ] else : raise Exception ( 'Conflict at %s' % '.' . join ( path + [ str ( key ) ] ) ) else : a [ key ] = b [ key ] return a
Merge two dictionaries recursively .
37,643
def make_pod_spec ( image , labels = { } , threads_per_worker = 1 , env = { } , extra_container_config = { } , extra_pod_config = { } , memory_limit = None , memory_request = None , cpu_limit = None , cpu_request = None , ) : args = [ 'dask-worker' , '$(DASK_SCHEDULER_ADDRESS)' , '--nthreads' , str ( threads_per_worker ) , '--death-timeout' , '60' , ] if memory_limit : args . extend ( [ '--memory-limit' , str ( memory_limit ) ] ) pod = client . V1Pod ( metadata = client . V1ObjectMeta ( labels = labels ) , spec = client . V1PodSpec ( restart_policy = 'Never' , containers = [ client . V1Container ( name = 'dask-worker' , image = image , args = args , env = [ client . V1EnvVar ( name = k , value = v ) for k , v in env . items ( ) ] , ) ] , tolerations = [ client . V1Toleration ( key = 'k8s.dask.org/dedicated' , operator = 'Equal' , value = 'worker' , effect = 'NoSchedule' , ) , client . V1Toleration ( key = 'k8s.dask.org_dedicated' , operator = 'Equal' , value = 'worker' , effect = 'NoSchedule' , ) , ] ) ) resources = client . V1ResourceRequirements ( limits = { } , requests = { } ) if cpu_request : resources . requests [ 'cpu' ] = cpu_request if memory_request : resources . requests [ 'memory' ] = memory_request if cpu_limit : resources . limits [ 'cpu' ] = cpu_limit if memory_limit : resources . limits [ 'memory' ] = memory_limit pod . spec . containers [ 0 ] . resources = resources for key , value in extra_container_config . items ( ) : _set_k8s_attribute ( pod . spec . containers [ 0 ] , key , value ) for key , value in extra_pod_config . items ( ) : _set_k8s_attribute ( pod . spec , key , value ) return pod
Create generic pod template from input parameters
37,644
def clean_pod_template ( pod_template ) : if isinstance ( pod_template , str ) : msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a yaml filename then use ' 'KubeCluster.from_yaml' ) raise TypeError ( msg % pod_template ) if isinstance ( pod_template , dict ) : msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a dictionary specification then use ' 'KubeCluster.from_dict' ) raise TypeError ( msg % str ( pod_template ) ) pod_template = copy . deepcopy ( pod_template ) if pod_template . metadata is None : pod_template . metadata = client . V1ObjectMeta ( ) if pod_template . metadata . labels is None : pod_template . metadata . labels = { } if pod_template . spec . containers [ 0 ] . env is None : pod_template . spec . containers [ 0 ] . env = [ ] return pod_template
Normalize pod template and check for type errors
37,645
def _cleanup_pods ( namespace , labels ) : api = kubernetes . client . CoreV1Api ( ) pods = api . list_namespaced_pod ( namespace , label_selector = format_labels ( labels ) ) for pod in pods . items : try : api . delete_namespaced_pod ( pod . metadata . name , namespace ) logger . info ( 'Deleted pod: %s' , pod . metadata . name ) except kubernetes . client . rest . ApiException as e : if e . status != 404 : raise
Remove all pods with these labels in this namespace
37,646
def format_labels ( labels ) : if labels : return ',' . join ( [ '{}={}' . format ( k , v ) for k , v in labels . items ( ) ] ) else : return ''
Convert a dictionary of labels into a comma separated string
37,647
def _namespace_default ( ) : ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' if os . path . exists ( ns_path ) : with open ( ns_path ) as f : return f . read ( ) . strip ( ) return 'default'
Get current namespace if running in a k8s cluster
37,648
def select_workers_to_close ( scheduler , n_to_close ) : workers = list ( scheduler . workers . values ( ) ) assert n_to_close <= len ( workers ) key = lambda ws : ws . metrics [ 'memory' ] to_close = set ( sorted ( scheduler . idle , key = key ) [ : n_to_close ] ) if len ( to_close ) < n_to_close : rest = sorted ( workers , key = key , reverse = True ) while len ( to_close ) < n_to_close : to_close . add ( rest . pop ( ) ) return [ ws . address for ws in to_close ]
Select n workers to close from scheduler
37,649
def from_yaml ( cls , yaml_path , ** kwargs ) : if not yaml : raise ImportError ( "PyYaml is required to use yaml functionality, please install it!" ) with open ( yaml_path ) as f : d = yaml . safe_load ( f ) d = dask . config . expand_environment_variables ( d ) return cls . from_dict ( d , ** kwargs )
Create cluster with worker pod spec defined by a YAML file
37,650
def pods ( self ) : return self . core_api . list_namespaced_pod ( self . namespace , label_selector = format_labels ( self . pod_template . metadata . labels ) ) . items
A list of kubernetes pods corresponding to current workers
37,651
def logs ( self , pod = None ) : if pod is None : return { pod . status . pod_ip : self . logs ( pod ) for pod in self . pods ( ) } return self . core_api . read_namespaced_pod_log ( pod . metadata . name , pod . metadata . namespace )
Logs from a worker pod
37,652
def scale ( self , n ) : pods = self . _cleanup_terminated_pods ( self . pods ( ) ) if n >= len ( pods ) : return self . scale_up ( n , pods = pods ) else : n_to_delete = len ( pods ) - n running_workers = list ( self . scheduler . workers . keys ( ) ) running_ips = set ( urlparse ( worker ) . hostname for worker in running_workers ) pending_pods = [ p for p in pods if p . status . pod_ip not in running_ips ] if pending_pods : pending_to_delete = pending_pods [ : n_to_delete ] logger . debug ( "Deleting pending pods: %s" , pending_to_delete ) self . _delete_pods ( pending_to_delete ) n_to_delete = n_to_delete - len ( pending_to_delete ) if n_to_delete <= 0 : return to_close = select_workers_to_close ( self . scheduler , n_to_delete ) logger . debug ( "Closing workers: %s" , to_close ) if len ( to_close ) < len ( self . scheduler . workers ) : @ gen . coroutine def f ( to_close ) : yield self . scheduler . retire_workers ( workers = to_close , remove = True , close_workers = True ) yield offload ( self . scale_down , to_close ) self . scheduler . loop . add_callback ( f , to_close ) return self . scale_down ( to_close )
Scale cluster to n workers
37,653
def scale_up ( self , n , pods = None , ** kwargs ) : maximum = dask . config . get ( 'kubernetes.count.max' ) if maximum is not None and maximum < n : logger . info ( "Tried to scale beyond maximum number of workers %d > %d" , n , maximum ) n = maximum pods = pods or self . _cleanup_terminated_pods ( self . pods ( ) ) to_create = n - len ( pods ) new_pods = [ ] for i in range ( 3 ) : try : for _ in range ( to_create ) : new_pods . append ( self . core_api . create_namespaced_pod ( self . namespace , self . pod_template ) ) to_create -= 1 break except kubernetes . client . rest . ApiException as e : if e . status == 500 and 'ServerTimeout' in e . body : logger . info ( "Server timeout, retry #%d" , i + 1 ) time . sleep ( 1 ) last_exception = e continue else : raise else : raise last_exception return new_pods
Make sure we have n dask - workers available for this cluster
37,654
def scale_down ( self , workers , pods = None ) : pods = pods or self . _cleanup_terminated_pods ( self . pods ( ) ) ips = set ( urlparse ( worker ) . hostname for worker in workers ) to_delete = [ p for p in pods if p . status . pod_ip in ips ] if not to_delete : return self . _delete_pods ( to_delete )
Remove the pods for the requested list of workers
37,655
def close ( self , ** kwargs ) : self . scale_down ( self . cluster . scheduler . workers ) return self . cluster . close ( ** kwargs )
Close this cluster
37,656
def job_to_dict ( job ) : data = OrderedDict ( ) data [ 'id' ] = job . id data [ 'name' ] = job . name data [ 'func' ] = job . func_ref data [ 'args' ] = job . args data [ 'kwargs' ] = job . kwargs data . update ( trigger_to_dict ( job . trigger ) ) if not job . pending : data [ 'misfire_grace_time' ] = job . misfire_grace_time data [ 'max_instances' ] = job . max_instances data [ 'next_run_time' ] = None if job . next_run_time is None else job . next_run_time return data
Converts a job to an OrderedDict .
37,657
def pop_trigger ( data ) : trigger_name = data . pop ( 'trigger' ) trigger_args = { } if trigger_name == 'date' : trigger_arg_names = ( 'run_date' , 'timezone' ) elif trigger_name == 'interval' : trigger_arg_names = ( 'weeks' , 'days' , 'hours' , 'minutes' , 'seconds' , 'start_date' , 'end_date' , 'timezone' ) elif trigger_name == 'cron' : trigger_arg_names = ( 'year' , 'month' , 'day' , 'week' , 'day_of_week' , 'hour' , 'minute' , 'second' , 'start_date' , 'end_date' , 'timezone' ) else : raise Exception ( 'Trigger %s is not supported.' % trigger_name ) for arg_name in trigger_arg_names : if arg_name in data : trigger_args [ arg_name ] = data . pop ( arg_name ) return trigger_name , trigger_args
Pops trigger and trigger args from a given dict .
37,658
def trigger_to_dict ( trigger ) : data = OrderedDict ( ) if isinstance ( trigger , DateTrigger ) : data [ 'trigger' ] = 'date' data [ 'run_date' ] = trigger . run_date elif isinstance ( trigger , IntervalTrigger ) : data [ 'trigger' ] = 'interval' data [ 'start_date' ] = trigger . start_date if trigger . end_date : data [ 'end_date' ] = trigger . end_date w , d , hh , mm , ss = extract_timedelta ( trigger . interval ) if w > 0 : data [ 'weeks' ] = w if d > 0 : data [ 'days' ] = d if hh > 0 : data [ 'hours' ] = hh if mm > 0 : data [ 'minutes' ] = mm if ss > 0 : data [ 'seconds' ] = ss elif isinstance ( trigger , CronTrigger ) : data [ 'trigger' ] = 'cron' if trigger . start_date : data [ 'start_date' ] = trigger . start_date if trigger . end_date : data [ 'end_date' ] = trigger . end_date for field in trigger . fields : if not field . is_default : data [ field . name ] = str ( field ) else : data [ 'trigger' ] = str ( trigger ) return data
Converts a trigger to an OrderedDict .
37,659
def fix_job_def ( job_def ) : if six . PY2 and isinstance ( job_def . get ( 'func' ) , six . text_type ) : job_def [ 'func' ] = str ( job_def . get ( 'func' ) ) if isinstance ( job_def . get ( 'start_date' ) , six . string_types ) : job_def [ 'start_date' ] = dateutil . parser . parse ( job_def . get ( 'start_date' ) ) if isinstance ( job_def . get ( 'end_date' ) , six . string_types ) : job_def [ 'end_date' ] = dateutil . parser . parse ( job_def . get ( 'end_date' ) ) if isinstance ( job_def . get ( 'run_date' ) , six . string_types ) : job_def [ 'run_date' ] = dateutil . parser . parse ( job_def . get ( 'run_date' ) ) if isinstance ( job_def . get ( 'trigger' ) , dict ) : trigger = job_def . pop ( 'trigger' ) job_def [ 'trigger' ] = trigger . pop ( 'type' , 'date' ) job_def . update ( trigger )
Replaces the datetime in string by datetime object .
37,660
def init_app ( self , app ) : self . app = app self . app . apscheduler = self self . _load_config ( ) self . _load_jobs ( ) if self . api_enabled : self . _load_api ( )
Initialize the APScheduler with a Flask application instance .
37,661
def add_listener ( self , callback , mask = EVENT_ALL ) : self . _scheduler . add_listener ( callback , mask )
Add a listener for scheduler events .
37,662
def add_job ( self , id , func , ** kwargs ) : job_def = dict ( kwargs ) job_def [ 'id' ] = id job_def [ 'func' ] = func job_def [ 'name' ] = job_def . get ( 'name' ) or id fix_job_def ( job_def ) return self . _scheduler . add_job ( ** job_def )
Add the given job to the job list and wakes up the scheduler if it s already running .
37,663
def delete_job ( self , id , jobstore = None ) : warnings . warn ( 'delete_job has been deprecated, use remove_job instead.' , DeprecationWarning ) self . remove_job ( id , jobstore )
DEPRECATED use remove_job instead .
37,664
def modify_job ( self , id , jobstore = None , ** changes ) : fix_job_def ( changes ) if 'trigger' in changes : trigger , trigger_args = pop_trigger ( changes ) self . _scheduler . reschedule_job ( id , jobstore , trigger , ** trigger_args ) return self . _scheduler . modify_job ( id , jobstore , ** changes )
Modify the properties of a single job . Modifications are passed to this method as extra keyword arguments .
37,665
def _load_config ( self ) : options = dict ( ) job_stores = self . app . config . get ( 'SCHEDULER_JOBSTORES' ) if job_stores : options [ 'jobstores' ] = job_stores executors = self . app . config . get ( 'SCHEDULER_EXECUTORS' ) if executors : options [ 'executors' ] = executors job_defaults = self . app . config . get ( 'SCHEDULER_JOB_DEFAULTS' ) if job_defaults : options [ 'job_defaults' ] = job_defaults timezone = self . app . config . get ( 'SCHEDULER_TIMEZONE' ) if timezone : options [ 'timezone' ] = timezone self . _scheduler . configure ( ** options ) self . auth = self . app . config . get ( 'SCHEDULER_AUTH' , self . auth ) self . api_enabled = self . app . config . get ( 'SCHEDULER_VIEWS_ENABLED' , self . api_enabled ) self . api_enabled = self . app . config . get ( 'SCHEDULER_API_ENABLED' , self . api_enabled ) self . api_prefix = self . app . config . get ( 'SCHEDULER_API_PREFIX' , self . api_prefix ) self . endpoint_prefix = self . app . config . get ( 'SCHEDULER_ENDPOINT_PREFIX' , self . endpoint_prefix ) self . allowed_hosts = self . app . config . get ( 'SCHEDULER_ALLOWED_HOSTS' , self . allowed_hosts )
Load the configuration from the Flask configuration .
37,666
def _load_jobs ( self ) : jobs = self . app . config . get ( 'SCHEDULER_JOBS' ) if not jobs : jobs = self . app . config . get ( 'JOBS' ) if jobs : for job in jobs : self . add_job ( ** job )
Load the job definitions from the Flask configuration .
37,667
def _load_api ( self ) : self . _add_url_route ( 'get_scheduler_info' , '' , api . get_scheduler_info , 'GET' ) self . _add_url_route ( 'add_job' , '/jobs' , api . add_job , 'POST' ) self . _add_url_route ( 'get_job' , '/jobs/<job_id>' , api . get_job , 'GET' ) self . _add_url_route ( 'get_jobs' , '/jobs' , api . get_jobs , 'GET' ) self . _add_url_route ( 'delete_job' , '/jobs/<job_id>' , api . delete_job , 'DELETE' ) self . _add_url_route ( 'update_job' , '/jobs/<job_id>' , api . update_job , 'PATCH' ) self . _add_url_route ( 'pause_job' , '/jobs/<job_id>/pause' , api . pause_job , 'POST' ) self . _add_url_route ( 'resume_job' , '/jobs/<job_id>/resume' , api . resume_job , 'POST' ) self . _add_url_route ( 'run_job' , '/jobs/<job_id>/run' , api . run_job , 'POST' )
Add the routes for the scheduler API .
37,668
def _handle_authentication_error ( self ) : response = make_response ( 'Access Denied' ) response . headers [ 'WWW-Authenticate' ] = self . auth . get_authenticate_header ( ) response . status_code = 401 return response
Return an authentication error .
37,669
def get_scheduler_info ( ) : scheduler = current_app . apscheduler d = OrderedDict ( [ ( 'current_host' , scheduler . host_name ) , ( 'allowed_hosts' , scheduler . allowed_hosts ) , ( 'running' , scheduler . running ) ] ) return jsonify ( d )
Gets the scheduler info .
37,670
def add_job ( ) : data = request . get_json ( force = True ) try : job = current_app . apscheduler . add_job ( ** data ) return jsonify ( job ) except ConflictingIdError : return jsonify ( dict ( error_message = 'Job %s already exists.' % data . get ( 'id' ) ) , status = 409 ) except Exception as e : return jsonify ( dict ( error_message = str ( e ) ) , status = 500 )
Adds a new job .
37,671
def get_job ( job_id ) : job = current_app . apscheduler . get_job ( job_id ) if not job : return jsonify ( dict ( error_message = 'Job %s not found' % job_id ) , status = 404 ) return jsonify ( job )
Gets a job .
37,672
def get_jobs ( ) : jobs = current_app . apscheduler . get_jobs ( ) job_states = [ ] for job in jobs : job_states . append ( job ) return jsonify ( job_states )
Gets all scheduled jobs .
37,673
def update_job ( job_id ) : data = request . get_json ( force = True ) try : current_app . apscheduler . modify_job ( job_id , ** data ) job = current_app . apscheduler . get_job ( job_id ) return jsonify ( job ) except JobLookupError : return jsonify ( dict ( error_message = 'Job %s not found' % job_id ) , status = 404 ) except Exception as e : return jsonify ( dict ( error_message = str ( e ) ) , status = 500 )
Updates a job .
37,674
def resume_job ( job_id ) : try : current_app . apscheduler . resume_job ( job_id ) job = current_app . apscheduler . get_job ( job_id ) return jsonify ( job ) except JobLookupError : return jsonify ( dict ( error_message = 'Job %s not found' % job_id ) , status = 404 ) except Exception as e : return jsonify ( dict ( error_message = str ( e ) ) , status = 500 )
Resumes a job .
37,675
def get_command ( self , ctx : click . Context , name : str ) -> click . Command : info = ctx . ensure_object ( ScriptInfo ) command = None try : command = info . load_app ( ) . cli . get_command ( ctx , name ) except NoAppException : pass if command is None : command = super ( ) . get_command ( ctx , name ) return command
Return the relevant command given the context and name .
37,676
async def render_template ( template_name_or_list : Union [ str , List [ str ] ] , ** context : Any ) -> str : await current_app . update_template_context ( context ) template = current_app . jinja_env . get_or_select_template ( template_name_or_list ) return await _render ( template , context )
Render the template with the context given .
37,677
async def render_template_string ( source : str , ** context : Any ) -> str : await current_app . update_template_context ( context ) template = current_app . jinja_env . from_string ( source ) return await _render ( template , context )
Render the template source with the context given .
37,678
def get_source ( self , environment : Environment , template : str , ) -> Tuple [ str , Optional [ str ] , Callable ] : for loader in self . _loaders ( ) : try : return loader . get_source ( environment , template ) except TemplateNotFound : continue raise TemplateNotFound ( template )
Returns the template source from the environment .
37,679
def list_templates ( self ) -> List [ str ] : result = set ( ) for loader in self . _loaders ( ) : for template in loader . list_templates ( ) : result . add ( str ( template ) ) return list ( result )
Returns a list of all avilable templates in environment .
37,680
def route ( self , path : str , methods : Optional [ List [ str ] ] = None , endpoint : Optional [ str ] = None , defaults : Optional [ dict ] = None , host : Optional [ str ] = None , subdomain : Optional [ str ] = None , * , provide_automatic_options : Optional [ bool ] = None , strict_slashes : bool = True , ) -> Callable : def decorator ( func : Callable ) -> Callable : self . add_url_rule ( path , endpoint , func , methods , defaults = defaults , host = host , subdomain = subdomain , provide_automatic_options = provide_automatic_options , strict_slashes = strict_slashes , ) return func return decorator
Add a route to the blueprint .
37,681
def websocket ( self , path : str , endpoint : Optional [ str ] = None , defaults : Optional [ dict ] = None , host : Optional [ str ] = None , subdomain : Optional [ str ] = None , * , strict_slashes : bool = True , ) -> Callable : def decorator ( func : Callable ) -> Callable : self . add_websocket ( path , endpoint , func , defaults = defaults , host = host , subdomain = subdomain , strict_slashes = strict_slashes , ) return func return decorator
Add a websocket to the blueprint .
37,682
def add_websocket ( self , path : str , endpoint : Optional [ str ] = None , view_func : Optional [ Callable ] = None , defaults : Optional [ dict ] = None , host : Optional [ str ] = None , subdomain : Optional [ str ] = None , * , strict_slashes : bool = True , ) -> None : return self . add_url_rule ( path , endpoint , view_func , { 'GET' } , defaults = defaults , host = host , subdomain = subdomain , provide_automatic_options = False , is_websocket = True , strict_slashes = strict_slashes , )
Add a websocket rule to the blueprint .
37,683
def endpoint ( self , endpoint : str ) -> Callable : def decorator ( func : Callable ) -> Callable : self . record_once ( lambda state : state . register_endpoint ( endpoint , func ) ) return func return decorator
Add an endpoint to the blueprint .
37,684
def before_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . before_request ( func , self . name ) ) return func
Add a before request function to the Blueprint .
37,685
def before_websocket ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . before_websocket ( func , self . name ) ) return func
Add a before request websocket to the Blueprint .
37,686
def before_app_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . before_request ( func ) ) return func
Add a before request function to the app .
37,687
def before_app_websocket ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . before_websocket ( func ) ) return func
Add a before request websocket to the App .
37,688
def before_app_first_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . before_first_request ( func ) ) return func
Add a before request first function to the app .
37,689
def after_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . after_request ( func , self . name ) ) return func
Add an after request function to the Blueprint .
37,690
def after_websocket ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . after_websocket ( func , self . name ) ) return func
Add an after websocket function to the Blueprint .
37,691
def after_app_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . after_request ( func ) ) return func
Add a after request function to the app .
37,692
def after_app_websocket ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . after_websocket ( func ) ) return func
Add an after websocket function to the App .
37,693
def teardown_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . teardown_request ( func , self . name ) ) return func
Add a teardown request function to the Blueprint .
37,694
def teardown_websocket ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . teardown_websocket ( func , self . name ) ) return func
Add a teardown websocket function to the Blueprint .
37,695
def teardown_app_request ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . teardown_request ( func ) ) return func
Add a teardown request function to the app .
37,696
def errorhandler ( self , error : Union [ Type [ Exception ] , int ] ) -> Callable : def decorator ( func : Callable ) -> Callable : self . register_error_handler ( error , func ) return func return decorator
Add an error handler function to the Blueprint .
37,697
def app_errorhandler ( self , error : Union [ Type [ Exception ] , int ] ) -> Callable : def decorator ( func : Callable ) -> Callable : self . record_once ( lambda state : state . app . register_error_handler ( error , func ) ) return func return decorator
Add an error handler function to the App .
37,698
def register_error_handler ( self , error : Union [ Type [ Exception ] , int ] , func : Callable ) -> None : self . record_once ( lambda state : state . app . register_error_handler ( error , func , self . name ) )
Add an error handler function to the blueprint .
37,699
def context_processor ( self , func : Callable ) -> Callable : self . record_once ( lambda state : state . app . context_processor ( func , self . name ) ) return func
Add a context processor function to this blueprint .