idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
225,500
def element_should_be_visible ( self , locator , loglevel = 'INFO' ) : if not self . _element_find ( locator , True , True ) . is_displayed ( ) : self . log_source ( loglevel ) raise AssertionError ( "Element '%s' should be visible " "but did not" % locator )
Verifies that element identified with locator is visible . Key attributes for arbitrary elements are id and name . See introduction for details about locating elements . New in AppiumLibrary 1 . 4 . 5
82
39
225,501
def element_text_should_be ( self , locator , expected , message = '' ) : self . _info ( "Verifying element '%s' contains exactly text '%s'." % ( locator , expected ) ) element = self . _element_find ( locator , True , True ) actual = element . text if expected != actual : if not message : message = "The text of element '%s' should have been '%s' but " "in fact it was '%s'." % ( locator , expected , actual ) raise AssertionError ( message )
Verifies element identified by locator exactly contains text expected . In contrast to Element Should Contain Text this keyword does not try a substring match but an exact match on the element identified by locator . message can be used to override the default error message . New in AppiumLibrary 1 . 4 .
125
61
225,502
def get_element_location ( self , locator ) : element = self . _element_find ( locator , True , True ) element_location = element . location self . _info ( "Element '%s' location: %s " % ( locator , element_location ) ) return element_location
Get element location Key attributes for arbitrary elements are id and name . See introduction for details about locating elements .
66
21
225,503
def get_element_size ( self , locator ) : element = self . _element_find ( locator , True , True ) element_size = element . size self . _info ( "Element '%s' size: %s " % ( locator , element_size ) ) return element_size
Get element size Key attributes for arbitrary elements are id and name . See introduction for details about locating elements .
66
21
225,504
def text_should_be_visible ( self , text , exact_match = False , loglevel = 'INFO' ) : if not self . _element_find_by_text ( text , exact_match ) . is_displayed ( ) : self . log_source ( loglevel ) raise AssertionError ( "Text '%s' should be visible " "but did not" % text )
Verifies that element identified with text is visible . New in AppiumLibrary 1 . 4 . 5
89
20
225,505
def close_application ( self ) : self . _debug ( 'Closing application with session id %s' % self . _current_application ( ) . session_id ) self . _cache . close ( )
Closes the current application and also close webdriver session .
45
12
225,506
def get_appium_sessionId ( self ) : self . _info ( "Appium Session ID: " + self . _current_application ( ) . session_id ) return self . _current_application ( ) . session_id
Returns the current session ID as a reference
51
8
225,507
def lock ( self , seconds = 5 ) : self . _current_application ( ) . lock ( robot . utils . timestr_to_secs ( seconds ) )
Lock the device for a certain period of time . iOS only .
38
13
225,508
def get_capability ( self , capability_name ) : try : capability = self . _current_application ( ) . capabilities [ capability_name ] except Exception as e : raise e return capability
Return the desired capability value by desired capability name
41
9
225,509
def _find_by_android ( self , browser , criteria , tag , constraints ) : return self . _filter_elements ( browser . find_elements_by_android_uiautomator ( criteria ) , tag , constraints )
Find element matches by UI Automator .
52
8
225,510
def _find_by_ios ( self , browser , criteria , tag , constraints ) : return self . _filter_elements ( browser . find_elements_by_ios_uiautomation ( criteria ) , tag , constraints )
Find element matches by UI Automation .
52
8
225,511
def _find_by_nsp ( self , browser , criteria , tag , constraints ) : return self . _filter_elements ( browser . find_elements_by_ios_predicate ( criteria ) , tag , constraints )
Find element matches by iOSNsPredicateString .
50
10
225,512
def _find_by_chain ( self , browser , criteria , tag , constraints ) : return self . _filter_elements ( browser . find_elements_by_ios_class_chain ( criteria ) , tag , constraints )
Find element matches by iOSChainString .
50
8
225,513
def press_keycode ( self , keycode , metastate = None ) : driver = self . _current_application ( ) driver . press_keycode ( keycode , metastate )
Sends a press of keycode to the device .
40
11
225,514
def long_press_keycode ( self , keycode , metastate = None ) : driver = self . _current_application ( ) driver . long_press_keycode ( int ( keycode ) , metastate )
Sends a long press of keycode to the device .
47
12
225,515
def rapl_read ( ) : basenames = glob . glob ( '/sys/class/powercap/intel-rapl:*/' ) basenames = sorted ( set ( { x for x in basenames } ) ) pjoin = os . path . join ret = list ( ) for path in basenames : name = None try : name = cat ( pjoin ( path , 'name' ) , fallback = None , binary = False ) except ( IOError , OSError , ValueError ) as err : logging . warning ( "ignoring %r for file %r" , ( err , path ) , RuntimeWarning ) continue if name : try : current = cat ( pjoin ( path , 'energy_uj' ) ) max_reading = 0.0 ret . append ( RaplStats ( name , float ( current ) , max_reading ) ) except ( IOError , OSError , ValueError ) as err : logging . warning ( "ignoring %r for file %r" , ( err , path ) , RuntimeWarning ) return ret
Read power stats and return dictionary
230
6
225,516
def calculate_bar_widths ( self , size , bardata ) : ( maxcol , _ ) = size if self . bar_width is not None : return [ self . bar_width ] * min ( len ( bardata ) , int ( maxcol / self . bar_width ) ) if len ( bardata ) >= maxcol : return [ 1 ] * maxcol widths = [ ] grow = maxcol remain = len ( bardata ) for _ in bardata : w = int ( float ( grow ) / remain + 0.5 ) widths . append ( w ) grow -= w remain -= 1 return widths
Return a list of bar widths one for each bar in data .
138
14
225,517
def set_visible_graphs ( self , visible_graph_list = None ) : if visible_graph_list is None : visible_graph_list = self . visible_graph_list vline = urwid . AttrWrap ( urwid . SolidFill ( u'|' ) , 'line' ) graph_vector_column_list = [ ] for state , graph , sub_title in zip ( visible_graph_list , self . bar_graph_vector , self . sub_title_list ) : if state : text_w = urwid . Text ( sub_title , align = 'center' ) sub_title_widget = urwid . ListBox ( [ text_w ] ) graph_a = [ ( 'fixed' , 1 , sub_title_widget ) , ( 'weight' , 1 , graph ) ] graph_and_title = urwid . Pile ( graph_a ) graph_vector_column_list . append ( ( 'weight' , 1 , graph_and_title ) ) graph_vector_column_list . append ( ( 'fixed' , 1 , vline ) ) # if all sub graph are disabled if not graph_vector_column_list : self . visible_graph_list = visible_graph_list self . original_widget = urwid . Pile ( [ ] ) return # remove the last vertical line separator graph_vector_column_list . pop ( ) y_label_a = ( 'weight' , 1 , urwid . Columns ( graph_vector_column_list ) ) y_label_and_graphs = [ self . y_label , y_label_a ] column_w = urwid . Columns ( y_label_and_graphs , dividechars = 1 ) y_label_and_graphs_widget = urwid . WidgetPlaceholder ( column_w ) init_widget = urwid . Pile ( [ ( 'fixed' , 1 , self . title ) , ( 'weight' , 1 , y_label_and_graphs_widget ) ] ) self . visible_graph_list = visible_graph_list self . original_widget = init_widget
Show a column of the graph selected for display
470
9
225,518
def get_processor_name ( ) : if platform . system ( ) == "Linux" : with open ( "/proc/cpuinfo" , "rb" ) as cpuinfo : all_info = cpuinfo . readlines ( ) for line in all_info : if b'model name' in line : return re . sub ( b'.*model name.*:' , b'' , line , 1 ) return platform . processor ( )
Returns the processor name in the system
92
7
225,519
def kill_child_processes ( parent_proc ) : logging . debug ( "Killing stress process" ) try : for proc in parent_proc . children ( recursive = True ) : logging . debug ( 'Killing %s' , proc ) proc . kill ( ) parent_proc . kill ( ) except AttributeError : logging . debug ( 'No such process' ) logging . debug ( 'Could not kill process' )
Kills a process and all its children
91
8
225,520
def output_to_csv ( sources , csv_writeable_file ) : file_exists = os . path . isfile ( csv_writeable_file ) with open ( csv_writeable_file , 'a' ) as csvfile : csv_dict = OrderedDict ( ) csv_dict . update ( { 'Time' : time . strftime ( "%Y-%m-%d_%H:%M:%S" ) } ) summaries = [ val for key , val in sources . items ( ) ] for summarie in summaries : csv_dict . update ( summarie . source . get_sensors_summary ( ) ) fieldnames = [ key for key , val in csv_dict . items ( ) ] writer = csv . DictWriter ( csvfile , fieldnames = fieldnames ) if not file_exists : writer . writeheader ( ) # file doesn't exist yet, write a header writer . writerow ( csv_dict )
Print statistics to csv file
223
6
225,521
def output_to_terminal ( sources ) : results = OrderedDict ( ) for source in sources : if source . get_is_available ( ) : source . update ( ) results . update ( source . get_summary ( ) ) for key , value in results . items ( ) : sys . stdout . write ( str ( key ) + ": " + str ( value ) + ", " ) sys . stdout . write ( "\n" ) sys . exit ( )
Print statistics to the terminal
103
5
225,522
def output_to_json ( sources ) : results = OrderedDict ( ) for source in sources : if source . get_is_available ( ) : source . update ( ) source_name = source . get_source_name ( ) results [ source_name ] = source . get_sensors_summary ( ) print ( json . dumps ( results , indent = 4 ) ) sys . exit ( )
Print statistics to the terminal in Json format
88
9
225,523
def make_user_config_dir ( ) : config_path = get_user_config_dir ( ) if not user_config_dir_exists ( ) : try : os . mkdir ( config_path ) os . mkdir ( os . path . join ( config_path , 'hooks.d' ) ) except OSError : return None return config_path
Create the user s - tui config directory if it doesn t exist
83
14
225,524
def load_script ( self , source_name , timeoutMilliseconds = 0 ) : script_path = os . path . join ( self . scripts_dir_path , self . _source_to_script_name ( source_name ) ) if os . path . isfile ( script_path ) : return ScriptHook ( script_path , timeoutMilliseconds ) return None
Return ScriptHook for source_name Source and with a ready timeout of timeoutMilliseconds
82
19
225,525
def get_sensors_summary ( self ) : sub_title_list = self . get_sensor_list ( ) graph_vector_summary = OrderedDict ( ) for graph_idx , graph_data in enumerate ( self . last_measurement ) : val_str = str ( round ( graph_data , 1 ) ) graph_vector_summary [ sub_title_list [ graph_idx ] ] = val_str return graph_vector_summary
This returns a dict of sensor of the source and their values
105
12
225,526
def get_summary ( self ) : graph_vector_summary = OrderedDict ( ) graph_vector_summary [ self . get_source_name ( ) ] = ( '[' + self . measurement_unit + ']' ) graph_vector_summary . update ( self . get_sensors_summary ( ) ) return graph_vector_summary
Returns a dict of source name and sensors with their values
76
11
225,527
def eval_hooks ( self ) : logging . debug ( "Evaluating hooks" ) if self . get_edge_triggered ( ) : logging . debug ( "Hook triggered" ) for hook in [ h for h in self . edge_hooks if h . is_ready ( ) ] : logging . debug ( "Hook invoked" ) hook . invoke ( )
Evaluate the current state of this Source and invoke any attached hooks if they ve been triggered
82
19
225,528
def invoke ( self ) : # Don't sleep a hook if it has never run if self . timeout_milliseconds > 0 : self . ready_time = ( datetime . now ( ) + timedelta ( milliseconds = self . timeout_milliseconds ) ) self . callback ( self . callback_args )
Run callback optionally passing a variable number of arguments callback_args
66
12
225,529
def radio_button ( g , l , fn ) : w = urwid . RadioButton ( g , l , False , on_state_change = fn ) w = urwid . AttrWrap ( w , 'button normal' , 'button select' ) return w
Inheriting radio button of urwid
58
8
225,530
def start_stress ( self , stress_cmd ) : with open ( os . devnull , 'w' ) as dev_null : try : stress_proc = subprocess . Popen ( stress_cmd , stdout = dev_null , stderr = dev_null ) self . set_stress_process ( psutil . Process ( stress_proc . pid ) ) except OSError : logging . debug ( "Unable to start stress" )
Starts a new stress process with a given cmd
98
10
225,531
def update_displayed_information ( self ) : for source in self . controller . sources : source_name = source . get_source_name ( ) if ( any ( self . graphs_menu . active_sensors [ source_name ] ) or any ( self . summary_menu . active_sensors [ source_name ] ) ) : source . update ( ) for graph in self . visible_graphs . values ( ) : graph . update ( ) # update graph summery for summary in self . visible_summaries . values ( ) : summary . update ( ) # Only update clock if not is stress mode if self . controller . stress_conroller . get_current_mode ( ) != 'Monitor' : self . clock_view . set_text ( seconds_to_text ( ( timeit . default_timer ( ) - self . controller . stress_start_time ) ) )
Update all the graphs that are being displayed
194
8
225,532
def on_reset_button ( self , _ ) : for graph in self . visible_graphs . values ( ) : graph . reset ( ) for graph in self . graphs . values ( ) : try : graph . source . reset ( ) except NotImplementedError : pass # Reset clock self . clock_view . set_text ( ZERO_TIME ) self . update_displayed_information ( )
Reset graph data and display empty graph
87
8
225,533
def on_stress_menu_open ( self , widget ) : self . original_widget = urwid . Overlay ( self . stress_menu . main_window , self . original_widget , ( 'relative' , self . left_margin ) , self . stress_menu . get_size ( ) [ 1 ] , ( 'relative' , self . top_margin ) , self . stress_menu . get_size ( ) [ 0 ] )
Open stress options
96
3
225,534
def on_help_menu_open ( self , widget ) : self . original_widget = urwid . Overlay ( self . help_menu . main_window , self . original_widget , ( 'relative' , self . left_margin ) , self . help_menu . get_size ( ) [ 1 ] , ( 'relative' , self . top_margin ) , self . help_menu . get_size ( ) [ 0 ] )
Open Help menu
96
3
225,535
def on_about_menu_open ( self , widget ) : self . original_widget = urwid . Overlay ( self . about_menu . main_window , self . original_widget , ( 'relative' , self . left_margin ) , self . about_menu . get_size ( ) [ 1 ] , ( 'relative' , self . top_margin ) , self . about_menu . get_size ( ) [ 0 ] )
Open About menu
96
3
225,536
def on_mode_button ( self , my_button , state ) : if state : # The new mode is the label of the button self . controller . set_mode ( my_button . get_label ( ) )
Notify the controller of a new mode setting .
47
10
225,537
def on_unicode_checkbox ( self , w = None , state = False ) : logging . debug ( "unicode State is %s" , state ) # Update the controller to the state of the checkbox self . controller . smooth_graph_mode = state if state : self . hline = urwid . AttrWrap ( urwid . SolidFill ( u'\N{LOWER ONE QUARTER BLOCK}' ) , 'line' ) else : self . hline = urwid . AttrWrap ( urwid . SolidFill ( u' ' ) , 'line' ) for graph in self . graphs . values ( ) : graph . set_smooth_colors ( state ) self . show_graphs ( )
Enable smooth edges if utf - 8 is supported
161
10
225,538
def _generate_graph_controls ( self ) : # setup mode radio buttons stress_modes = self . controller . stress_conroller . get_modes ( ) group = [ ] for mode in stress_modes : self . mode_buttons . append ( radio_button ( group , mode , self . on_mode_button ) ) # Set default radio button to "Monitor" mode self . mode_buttons [ 0 ] . set_state ( True , do_callback = False ) # Create list of buttons control_options = list ( ) control_options . append ( button ( 'Graphs' , self . on_graphs_menu_open ) ) control_options . append ( button ( 'Summaries' , self . on_summary_menu_open ) ) if self . controller . stress_exe : control_options . append ( button ( 'Stress Options' , self . on_stress_menu_open ) ) control_options . append ( button ( "Reset" , self . on_reset_button ) ) control_options . append ( button ( 'Help' , self . on_help_menu_open ) ) control_options . append ( button ( 'About' , self . on_about_menu_open ) ) control_options . append ( button ( "Save Settings" , self . on_save_settings ) ) control_options . append ( button ( "Quit" , self . on_exit_program ) ) # Create the menu animate_controls = urwid . GridFlow ( control_options , 18 , 2 , 0 , 'center' ) # Create smooth graph selection button default_smooth = self . controller . smooth_graph_mode if urwid . get_encoding_mode ( ) == "utf8" : unicode_checkbox = urwid . CheckBox ( "UTF-8" , state = default_smooth , on_state_change = self . on_unicode_checkbox ) # Init the state of the graph accoding to the selected mode self . on_unicode_checkbox ( state = default_smooth ) else : unicode_checkbox = urwid . Text ( "[N/A] UTF-8" ) install_stress_message = urwid . Text ( "" ) if not self . controller . stress_exe : install_stress_message = urwid . Text ( ( 'button normal' , u"(N/A) install stress" ) ) controls = [ urwid . Text ( ( 'bold text' , u"Modes" ) , align = "center" ) ] controls += self . mode_buttons controls += [ install_stress_message , urwid . Text ( ( 'bold text' , u"Stress Timer" ) , align = "center" ) , self . clock_view , urwid . Divider ( ) , urwid . Text ( ( 'bold text' , u"Control Options" ) , align = "center" ) , animate_controls , urwid . Divider ( ) , urwid . Text ( ( 'bold text' , u"Visual Options" ) , align = "center" ) , unicode_checkbox , self . refresh_rate_ctrl , urwid . Divider ( ) , urwid . Text ( ( 'bold text' , u"Summaries" ) , align = "center" ) , ] return controls
Display sidebar controls . i . e . buttons and controls
728
11
225,539
def _generate_cpu_stats ( ) : cpu_name = urwid . Text ( "CPU Name N/A" , align = "center" ) try : cpu_name = urwid . Text ( get_processor_name ( ) . strip ( ) , align = "center" ) except OSError : logging . info ( "CPU name not available" ) return [ urwid . Text ( ( 'bold text' , "CPU Detected" ) , align = "center" ) , cpu_name , urwid . Divider ( ) ]
Read and display processor name
118
5
225,540
def show_graphs ( self ) : elements = itertools . chain . from_iterable ( ( [ graph ] for graph in self . visible_graphs . values ( ) ) ) self . graph_place_holder . original_widget = urwid . Pile ( elements )
Show a pile of the graph selected for dislpay
61
10
225,541
def _load_config ( self , t_thresh ) : # Load and configure user config dir when controller starts if not user_config_dir_exists ( ) : user_config_dir = make_user_config_dir ( ) else : user_config_dir = get_user_config_dir ( ) if user_config_dir is None : logging . warning ( "Failed to find or create scripts directory,\ proceeding without scripting support" ) self . script_hooks_enabled = False else : self . script_loader = ScriptHookLoader ( user_config_dir ) # Use user config file if one was saved before self . conf = None if user_config_file_exists ( ) : self . conf = configparser . ConfigParser ( ) self . conf . read ( get_user_config_file ( ) ) else : logging . debug ( "Config file not found" ) # Load refresh refresh rate from config try : self . refresh_rate = str ( self . conf . getfloat ( 'GraphControll' , 'refresh' ) ) logging . debug ( "User refresh rate: %s" , self . refresh_rate ) except ( AttributeError , ValueError , configparser . NoOptionError , configparser . NoSectionError ) : logging . debug ( "No refresh rate configed" ) # Change UTF8 setting from config try : if self . conf . getboolean ( 'GraphControll' , 'UTF8' ) : self . smooth_graph_mode = True else : logging . debug ( "UTF8 selected as %s" , self . conf . get ( 'GraphControll' , 'UTF8' ) ) except ( AttributeError , ValueError , configparser . NoOptionError , configparser . NoSectionError ) : logging . debug ( "No user config for utf8" ) # Try to load high temperature threshold if configured if t_thresh is None : try : self . temp_thresh = self . conf . get ( 'GraphControll' , 'TTHRESH' ) logging . debug ( "Temperature threshold set to %s" , self . temp_thresh ) except ( AttributeError , ValueError , configparser . NoOptionError , configparser . NoSectionError ) : logging . debug ( "No user config for temp threshold" ) # This should be the only place where sources are configured possible_sources = [ TempSource ( self . temp_thresh ) , FreqSource ( ) , UtilSource ( ) , RaplPowerSource ( ) , FanSource ( ) ] # Load sensors config if available sources = [ x . get_source_name ( ) for x in possible_sources if x . get_is_available ( ) ] for source in sources : try : options = list ( self . conf . items ( source + ",Graphs" ) ) for option in options : # Returns tuples of values in order self . graphs_default_conf [ source ] . append ( str_to_bool ( option [ 1 ] ) ) options = list ( self . conf . items ( source + ",Summaries" ) ) for option in options : # Returns tuples of values in order self . summary_default_conf [ source ] . append ( str_to_bool ( option [ 1 ] ) ) except ( AttributeError , ValueError , configparser . NoOptionError , configparser . NoSectionError ) : logging . debug ( "Error reading sensors config" ) return possible_sources
Uses configurations defined by user to configure sources for display . This should be the only place where sources are initiated
746
22
225,542
def _config_stress ( self ) : # Configure stress_process self . stress_exe = None stress_installed = False self . stress_exe = which ( 'stress' ) if self . stress_exe : stress_installed = True else : self . stress_exe = which ( 'stress-ng' ) if self . stress_exe : stress_installed = True self . firestarter = None firestarter_installed = False if os . path . isfile ( './FIRESTARTER/FIRESTARTER' ) : self . firestarter = os . path . join ( os . getcwd ( ) , 'FIRESTARTER' , 'FIRESTARTER' ) firestarter_installed = True else : firestarter_exe = which ( 'FIRESTARTER' ) if firestarter_exe is not None : self . firestarter = firestarter_exe firestarter_installed = True return StressController ( stress_installed , firestarter_installed )
Configures the possible stress processes and modes
205
8
225,543
def main ( self ) : loop = MainLoop ( self . view , DEFAULT_PALETTE , handle_mouse = self . handle_mouse ) self . view . show_graphs ( ) self . animate_graph ( loop ) try : loop . run ( ) except ( ZeroDivisionError ) as err : # In case of Zero division, we want an error to return, and # get a clue where this happens logging . debug ( "Some stat caused divide by zero exception. Exiting" ) logging . error ( err , exc_info = True ) print ( ERROR_MESSAGE ) except ( AttributeError ) as err : # In this case we restart the loop, to address bug #50, where # urwid crashes on multiple presses on 'esc' logging . debug ( "Catch attribute Error in urwid and restart" ) logging . debug ( err , exc_info = True ) self . main ( ) except ( psutil . NoSuchProcess ) as err : # This might happen if the stress process is not found, in this # case, we want to know why logging . error ( "No such process error" ) logging . error ( err , exc_info = True ) print ( ERROR_MESSAGE )
Starts the main loop and graph animation
263
8
225,544
def update_stress_mode ( self ) : self . stress_conroller . kill_stress_process ( ) # Start a new clock upon starting a new stress test self . view . clock_view . set_text ( ZERO_TIME ) self . stress_start_time = timeit . default_timer ( ) if self . stress_conroller . get_current_mode ( ) == 'Stress' : stress_cmd = self . view . stress_menu . get_stress_cmd ( ) self . stress_conroller . start_stress ( stress_cmd ) elif self . stress_conroller . get_current_mode ( ) == 'FIRESTARTER' : stress_cmd = [ self . firestarter ] self . stress_conroller . start_stress ( stress_cmd )
Updates stress mode according to radio buttons state
172
9
225,545
def save_settings ( self ) : def _save_displayed_setting ( conf , submenu ) : for source , visible_sensors in self . view . graphs_menu . active_sensors . items ( ) : section = source + "," + submenu conf . add_section ( section ) sources = self . sources logging . debug ( "Saving settings for %s" , source ) logging . debug ( "Visible sensors %s" , visible_sensors ) # TODO: consider changing sensors_list to dict curr_sensor = [ x for x in sources if x . get_source_name ( ) == source ] [ 0 ] sensor_list = curr_sensor . get_sensor_list ( ) for sensor_id , sensor in enumerate ( sensor_list ) : try : conf . set ( section , sensor , str ( visible_sensors [ sensor_id ] ) ) except IndexError : conf . set ( section , sensor , str ( True ) ) if not user_config_dir_exists ( ) : make_user_config_dir ( ) conf = configparser . ConfigParser ( ) config_file = get_user_config_file ( ) with open ( config_file , 'w' ) as cfgfile : conf . add_section ( 'GraphControll' ) # Save the configured refresh rete conf . set ( 'GraphControll' , 'refresh' , str ( self . refresh_rate ) ) # Save the configured UTF8 setting conf . set ( 'GraphControll' , 'UTF8' , str ( self . smooth_graph_mode ) ) # Save the configured t_thresh if self . temp_thresh : conf . set ( 'GraphControll' , 'TTHRESH' , str ( self . temp_thresh ) ) _save_displayed_setting ( conf , "Graphs" ) _save_displayed_setting ( conf , "Summaries" ) conf . write ( cfgfile )
Save the current configuration to a user config file
436
9
225,546
def animate_graph ( self , loop , user_data = None ) : self . view . update_displayed_information ( ) # Save to CSV if configured if self . save_csv or self . csv_file is not None : output_to_csv ( self . view . summaries , self . csv_file ) # Set next update self . animate_alarm = loop . set_alarm_in ( float ( self . refresh_rate ) , self . animate_graph ) if self . args . debug_run : # refresh rate is a string in float format self . debug_run_counter += int ( float ( self . refresh_rate ) ) if self . debug_run_counter >= 8 : self . exit_program ( )
Update the graph and schedule the next update This is where the magic happens
161
14
225,547
def obfuscation_machine ( use_unicode = False , identifier_length = 1 ) : # This generates a list of the letters a-z: lowercase = list ( map ( chr , range ( 97 , 123 ) ) ) # Same thing but ALL CAPS: uppercase = list ( map ( chr , range ( 65 , 90 ) ) ) if use_unicode : # Python 3 lets us have some *real* fun: allowed_categories = ( 'LC' , 'Ll' , 'Lu' , 'Lo' , 'Lu' ) # All the fun characters start at 1580 (hehe): big_list = list ( map ( chr , range ( 1580 , HIGHEST_UNICODE ) ) ) max_chars = 1000 # Ought to be enough for anybody :) combined = [ ] rtl_categories = ( 'AL' , 'R' ) # AL == Arabic, R == Any right-to-left last_orientation = 'L' # L = Any left-to-right # Find a good mix of left-to-right and right-to-left characters while len ( combined ) < max_chars : char = choice ( big_list ) if unicodedata . category ( char ) in allowed_categories : orientation = unicodedata . bidirectional ( char ) if last_orientation in rtl_categories : if orientation not in rtl_categories : combined . append ( char ) else : if orientation in rtl_categories : combined . append ( char ) last_orientation = orientation else : combined = lowercase + uppercase shuffle ( combined ) # Randomize it all to keep things interesting while True : for perm in permutations ( combined , identifier_length ) : perm = "" . join ( perm ) if perm not in RESERVED_WORDS : # Can't replace reserved words yield perm identifier_length += 1
A generator that returns short sequential combinations of lower and upper - case letters that will never repeat .
411
19
225,548
def apply_obfuscation ( source ) : global keyword_args global imported_modules tokens = token_utils . listified_tokenizer ( source ) keyword_args = analyze . enumerate_keyword_args ( tokens ) imported_modules = analyze . enumerate_imports ( tokens ) variables = find_obfuscatables ( tokens , obfuscatable_variable ) classes = find_obfuscatables ( tokens , obfuscatable_class ) functions = find_obfuscatables ( tokens , obfuscatable_function ) for variable in variables : replace_obfuscatables ( tokens , obfuscate_variable , variable , name_generator ) for function in functions : replace_obfuscatables ( tokens , obfuscate_function , function , name_generator ) for _class in classes : replace_obfuscatables ( tokens , obfuscate_class , _class , name_generator ) return token_utils . untokenize ( tokens )
Returns source all obfuscated .
216
6
225,549
def bz2_pack ( source ) : import bz2 , base64 out = "" # Preserve shebangs (don't care about encodings for this) first_line = source . split ( '\n' ) [ 0 ] if analyze . shebang . match ( first_line ) : if py3 : if first_line . rstrip ( ) . endswith ( 'python' ) : # Make it python3 first_line = first_line . rstrip ( ) first_line += '3' #!/usr/bin/env python3 out = first_line + '\n' compressed_source = bz2 . compress ( source . encode ( 'utf-8' ) ) out += 'import bz2, base64\n' out += "exec(bz2.decompress(base64.b64decode('" out += base64 . b64encode ( compressed_source ) . decode ( 'utf-8' ) out += "')))\n" return out
Returns source as a bzip2 - compressed self - extracting python script .
220
15
225,550
def iso_register ( iso_code ) : def wrapper ( cls ) : registry . register ( iso_code , cls ) return cls return wrapper
Registers Calendar class as country or region in IsoRegistry .
33
14
225,551
def get_calendar_class ( self , iso_code ) : code_elements , is_subregion = self . _code_elements ( iso_code ) if is_subregion and iso_code not in self . region_registry : # subregion code not in region_registry code = code_elements [ 0 ] else : # subregion code in region_registry or is not a subregion code = iso_code return self . region_registry . get ( code )
Retrieves calendar class associated with given iso_code .
108
12
225,552
def get_subregions ( self , iso_code ) : items = OrderedDict ( ) for key , value in self . region_registry . items ( ) : code_elements , is_subregion = self . _code_elements ( key ) if is_subregion and code_elements [ 0 ] == iso_code : items [ key ] = value return items
Returns subregion calendar classes for given region iso_code .
84
12
225,553
def items ( self , region_codes , include_subregions = False ) : items = OrderedDict ( ) for code in region_codes : try : items [ code ] = self . region_registry [ code ] except KeyError : continue if include_subregions : items . update ( self . get_subregions ( code ) ) return items
Returns calendar classes for regions
78
5
225,554
def cleaned_date ( day , keep_datetime = False ) : if not isinstance ( day , ( date , datetime ) ) : raise UnsupportedDateType ( "`{}` is of unsupported type ({})" . format ( day , type ( day ) ) ) if not keep_datetime : if hasattr ( day , 'date' ) and callable ( day . date ) : day = day . date ( ) return day
Return a clean date type .
94
6
225,555
def get_fixed_holidays ( self , year ) : days = [ ] for month , day , label in self . FIXED_HOLIDAYS : days . append ( ( date ( year , month , day ) , label ) ) return days
Return the fixed days according to the FIXED_HOLIDAYS class property
54
17
225,556
def get_holiday_label ( self , day ) : day = cleaned_date ( day ) return { day : label for day , label in self . holidays ( day . year ) } . get ( day )
Return the label of the holiday if the date is a holiday
44
12
225,557
def is_working_day ( self , day , extra_working_days = None , extra_holidays = None ) : day = cleaned_date ( day ) if extra_working_days : extra_working_days = tuple ( map ( cleaned_date , extra_working_days ) ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) # Extra lists exceptions if extra_working_days and day in extra_working_days : return True # Regular rules if day . weekday ( ) in self . get_weekend_days ( ) : return False return not self . is_holiday ( day , extra_holidays = extra_holidays )
Return True if it s a working day . In addition to the regular holidays you can add exceptions .
152
20
225,558
def is_holiday ( self , day , extra_holidays = None ) : day = cleaned_date ( day ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) if extra_holidays and day in extra_holidays : return True return day in self . holidays_set ( day . year )
Return True if it s an holiday . In addition to the regular holidays you can add exceptions .
78
19
225,559
def add_working_days ( self , day , delta , extra_working_days = None , extra_holidays = None , keep_datetime = False ) : day = cleaned_date ( day , keep_datetime ) if extra_working_days : extra_working_days = tuple ( map ( cleaned_date , extra_working_days ) ) if extra_holidays : extra_holidays = tuple ( map ( cleaned_date , extra_holidays ) ) days = 0 temp_day = day if type ( temp_day ) is datetime and not keep_datetime : temp_day = temp_day . date ( ) day_added = 1 if delta >= 0 else - 1 delta = abs ( delta ) while days < delta : temp_day = temp_day + timedelta ( days = day_added ) if self . is_working_day ( temp_day , extra_working_days = extra_working_days , extra_holidays = extra_holidays ) : days += 1 return temp_day
Add delta working days to the date .
221
8
225,560
def sub_working_days ( self , day , delta , extra_working_days = None , extra_holidays = None , keep_datetime = False ) : delta = abs ( delta ) return self . add_working_days ( day , - delta , extra_working_days , extra_holidays , keep_datetime = keep_datetime )
Substract delta working days to the date .
77
9
225,561
def find_following_working_day ( self , day ) : day = cleaned_date ( day ) while day . weekday ( ) in self . get_weekend_days ( ) : day = day + timedelta ( days = 1 ) return day
Looks for the following working day if not already a working day .
54
13
225,562
def get_first_weekday_after ( day , weekday ) : day_delta = ( weekday - day . weekday ( ) ) % 7 day = day + timedelta ( days = day_delta ) return day
Get the first weekday after a given day . If the day is the same weekday the same day will be returned .
47
23
225,563
def get_working_days_delta ( self , start , end ) : start = cleaned_date ( start ) end = cleaned_date ( end ) if start == end : return 0 if start > end : start , end = end , start # Starting count here count = 0 while start < end : start += timedelta ( days = 1 ) if self . is_working_day ( start ) : count += 1 return count
Return the number of working day between two given dates . The order of the dates provided doesn t matter .
90
21
225,564
def get_holy_thursday ( self , year ) : sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 3 )
Return the date of the last thursday before easter
40
11
225,565
def get_good_friday ( self , year ) : sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 2 )
Return the date of the last friday before easter
40
11
225,566
def get_clean_monday ( self , year ) : sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 48 )
Return the clean monday date
40
6
225,567
def get_easter_saturday ( self , year ) : sunday = self . get_easter_sunday ( year ) return sunday - timedelta ( days = 1 )
Return the Easter Saturday date
41
5
225,568
def get_easter_monday ( self , year ) : sunday = self . get_easter_sunday ( year ) return sunday + timedelta ( days = 1 )
Return the date of the monday after easter
41
10
225,569
def get_variable_days ( self , year ) : # noqa days = super ( ChristianMixin , self ) . get_variable_days ( year ) if self . include_epiphany : days . append ( ( date ( year , 1 , 6 ) , "Epiphany" ) ) if self . include_clean_monday : days . append ( ( self . get_clean_monday ( year ) , "Clean Monday" ) ) if self . include_annunciation : days . append ( ( date ( year , 3 , 25 ) , "Annunciation" ) ) if self . include_ash_wednesday : days . append ( ( self . get_ash_wednesday ( year ) , self . ash_wednesday_label ) ) if self . include_palm_sunday : days . append ( ( self . get_palm_sunday ( year ) , "Palm Sunday" ) ) if self . include_holy_thursday : days . append ( ( self . get_holy_thursday ( year ) , "Holy Thursday" ) ) if self . include_good_friday : days . append ( ( self . get_good_friday ( year ) , self . good_friday_label ) ) if self . include_easter_saturday : days . append ( ( self . get_easter_saturday ( year ) , "Easter Saturday" ) ) if self . include_easter_sunday : days . append ( ( self . get_easter_sunday ( year ) , "Easter Sunday" ) ) if self . include_easter_monday : days . append ( ( self . get_easter_monday ( year ) , "Easter Monday" ) ) if self . include_assumption : days . append ( ( date ( year , 8 , 15 ) , "Assumption of Mary to Heaven" ) ) if self . include_all_saints : days . append ( ( date ( year , 11 , 1 ) , "All Saints Day" ) ) if self . include_all_souls : days . append ( ( date ( year , 11 , 2 ) , "All Souls Day" ) ) if self . include_immaculate_conception : days . append ( ( date ( year , 12 , 8 ) , self . immaculate_conception_label ) ) if self . include_christmas : days . append ( ( date ( year , 12 , 25 ) , "Christmas Day" ) ) if self . include_christmas_eve : days . append ( ( date ( year , 12 , 24 ) , "Christmas Eve" ) ) if self . include_boxing_day : days . append ( ( date ( year , 12 , 26 ) , self . boxing_day_label ) ) if self . include_ascension : days . append ( ( self . get_ascension_thursday ( year ) , "Ascension Thursday" ) ) if self . include_whit_monday : days . append ( ( self . get_whit_monday ( year ) , self . whit_monday_label ) ) if self . include_whit_sunday : days . append ( ( self . get_whit_sunday ( year ) , self . whit_sunday_label ) ) if self . include_corpus_christi : days . append ( ( self . get_corpus_christi ( year ) , "Corpus Christi" ) ) return days
Return the christian holidays list according to the mixin
756
11
225,570
def get_chinese_new_year ( self , year ) : days = [ ] lunar_first_day = ChineseNewYearCalendar . lunar ( year , 1 , 1 ) # Chinese new year's eve if self . include_chinese_new_year_eve : days . append ( ( lunar_first_day - timedelta ( days = 1 ) , self . chinese_new_year_eve_label ) ) # Chinese new year (is included by default) if self . include_chinese_new_year : days . append ( ( lunar_first_day , self . chinese_new_year_label ) ) if self . include_chinese_second_day : lunar_second_day = lunar_first_day + timedelta ( days = 1 ) days . append ( ( lunar_second_day , self . chinese_second_day_label ) ) if self . include_chinese_third_day : lunar_third_day = lunar_first_day + timedelta ( days = 2 ) days . append ( ( lunar_third_day , self . chinese_third_day_label ) ) if self . shift_sunday_holidays : if lunar_first_day . weekday ( ) == SUN : if self . shift_start_cny_sunday : days . append ( ( lunar_first_day - timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) else : if self . include_chinese_third_day : shift_day = lunar_third_day else : shift_day = lunar_second_day days . append ( ( shift_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) if ( lunar_second_day . weekday ( ) == SUN and self . include_chinese_third_day ) : days . append ( ( lunar_third_day + timedelta ( days = 1 ) , "Chinese Lunar New Year shift" ) , ) return days
Compute Chinese New Year days . To return a list of holidays .
431
14
225,571
def get_shifted_holidays ( self , dates ) : for holiday , label in dates : if holiday . weekday ( ) == SUN : yield ( holiday + timedelta ( days = 1 ) , label + ' shift' )
Taking a list of existing holidays yield a list of shifted days if the holiday falls on SUN .
48
19
225,572
def get_calendar_holidays ( self , year ) : # Unshifted days are here: days = super ( ChineseNewYearCalendar , self ) . get_calendar_holidays ( year ) if self . shift_sunday_holidays : days_to_inspect = copy ( days ) for day_shifted in self . get_shifted_holidays ( days_to_inspect ) : days . append ( day_shifted ) return days
Take into account the eventual shift to the next MON if any holiday falls on SUN .
103
17
225,573
def calculate_equinoxes ( self , year , timezone = 'UTC' ) : tz = pytz . timezone ( timezone ) d1 = ephem . next_equinox ( str ( year ) ) d = ephem . Date ( str ( d1 ) ) equinox1 = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) d2 = ephem . next_equinox ( d1 ) d = ephem . Date ( str ( d2 ) ) equinox2 = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) return ( equinox1 . date ( ) , equinox2 . date ( ) )
calculate equinox with time zone
164
9
225,574
def solar_term ( self , year , degrees , timezone = 'UTC' ) : twopi = 2 * pi tz = pytz . timezone ( timezone ) # Find out the sun's current longitude. sun = ephem . Sun ( ephem . Date ( str ( year ) ) ) current_longitude = sun . hlong - pi # Find approximately the right time of year. target_longitude = degrees * ephem . degree difference = ( target_longitude - current_longitude ) % twopi t0 = ephem . Date ( str ( year ) ) + 365.25 * difference / twopi # Zero in on the exact moment. def f ( t ) : sun . compute ( t ) longitude = sun . hlong - pi return ephem . degrees ( target_longitude - longitude ) . znorm d = ephem . Date ( ephem . newton ( f , t0 , t0 + ephem . minute ) ) solar_term = d . datetime ( ) + tz . utcoffset ( d . datetime ( ) ) return solar_term . date ( )
Returns the date of the solar term for the given longitude and the given year .
244
17
225,575
def get_spring_holiday ( self , year ) : easter = self . get_easter_monday ( year ) spring_holiday = self . get_nth_weekday_in_month ( year , 4 , MON , 3 ) if easter == spring_holiday : spring_holiday = self . get_nth_weekday_in_month ( year , 4 , MON , 2 ) return ( spring_holiday , self . spring_holiday_label )
Return Spring Holiday for Edinburgh .
101
6
225,576
def get_victoria_day ( self , year ) : may_24th = date ( year , 5 , 24 ) # Since "MON(day) == 0", it's either the difference between MON and the # current weekday (starting at 0), or 7 days before the May 24th shift = may_24th . weekday ( ) or 7 victoria_day = may_24th - timedelta ( days = shift ) return ( victoria_day , "Victoria Day" )
Return Victoria Day for Edinburgh .
102
6
225,577
def read_relative_file ( filename ) : path = join ( dirname ( abspath ( __file__ ) ) , filename ) with io . open ( path , encoding = 'utf-8' ) as f : return f . read ( )
Return the contents of the given file .
52
8
225,578
def lonely_buckets ( self ) : hrago = time . monotonic ( ) - 3600 return [ b for b in self . buckets if b . last_updated < hrago ]
Get all of the buckets that haven t been updated in over an hour .
41
15
225,579
def get_bucket_for ( self , node ) : for index , bucket in enumerate ( self . buckets ) : if node . long_id < bucket . range [ 1 ] : return index # we should never be here, but make linter happy return None
Get the index of the bucket that the given node would fall into .
56
14
225,580
async def _find ( self , rpcmethod ) : log . info ( "crawling network with nearest: %s" , str ( tuple ( self . nearest ) ) ) count = self . alpha if self . nearest . get_ids ( ) == self . last_ids_crawled : count = len ( self . nearest ) self . last_ids_crawled = self . nearest . get_ids ( ) dicts = { } for peer in self . nearest . get_uncontacted ( ) [ : count ] : dicts [ peer . id ] = rpcmethod ( peer , self . node ) self . nearest . mark_contacted ( peer ) found = await gather_dict ( dicts ) return await self . _nodes_found ( found )
Get either a value or list of nodes .
164
9
225,581
def check_dht_value_type ( value ) : typeset = [ int , float , bool , str , bytes ] return type ( value ) in typeset
Checks to see if the type of the value is a valid type for placing in the dht .
35
21
225,582
async def listen ( self , port , interface = '0.0.0.0' ) : loop = asyncio . get_event_loop ( ) listen = loop . create_datagram_endpoint ( self . _create_protocol , local_addr = ( interface , port ) ) log . info ( "Node %i listening on %s:%i" , self . node . long_id , interface , port ) self . transport , self . protocol = await listen # finally, schedule refreshing table self . refresh_table ( )
Start listening on the given port .
116
7
225,583
async def bootstrap ( self , addrs ) : log . debug ( "Attempting to bootstrap node with %i initial contacts" , len ( addrs ) ) cos = list ( map ( self . bootstrap_node , addrs ) ) gathered = await asyncio . gather ( * cos ) nodes = [ node for node in gathered if node is not None ] spider = NodeSpiderCrawl ( self . protocol , self . node , nodes , self . ksize , self . alpha ) return await spider . find ( )
Bootstrap the server by connecting to other known nodes in the network .
111
14
225,584
async def get ( self , key ) : log . info ( "Looking up key %s" , key ) dkey = digest ( key ) # if this node has it, return it if self . storage . get ( dkey ) is not None : return self . storage . get ( dkey ) node = Node ( dkey ) nearest = self . protocol . router . find_neighbors ( node ) if not nearest : log . warning ( "There are no known neighbors to get key %s" , key ) return None spider = ValueSpiderCrawl ( self . protocol , node , nearest , self . ksize , self . alpha ) return await spider . find ( )
Get a key if the network has it .
143
9
225,585
async def set ( self , key , value ) : if not check_dht_value_type ( value ) : raise TypeError ( "Value must be of type int, float, bool, str, or bytes" ) log . info ( "setting '%s' = '%s' on network" , key , value ) dkey = digest ( key ) return await self . set_digest ( dkey , value )
Set the given string key to the given value in the network .
92
13
225,586
def save_state_regularly ( self , fname , frequency = 600 ) : self . save_state ( fname ) loop = asyncio . get_event_loop ( ) self . save_state_loop = loop . call_later ( frequency , self . save_state_regularly , fname , frequency )
Save the state of node with a given regularity to the given filename .
69
15
225,587
def push ( self , nodes ) : if not isinstance ( nodes , list ) : nodes = [ nodes ] for node in nodes : if node not in self : distance = self . node . distance_to ( node ) heapq . heappush ( self . heap , ( distance , node ) )
Push nodes onto heap .
63
5
225,588
def shared_prefix ( args ) : i = 0 while i < min ( map ( len , args ) ) : if len ( set ( map ( operator . itemgetter ( i ) , args ) ) ) != 1 : break i += 1 return args [ 0 ] [ : i ]
Find the shared prefix between the strings .
60
8
225,589
def get_refresh_ids ( self ) : ids = [ ] for bucket in self . router . lonely_buckets ( ) : rid = random . randint ( * bucket . range ) . to_bytes ( 20 , byteorder = 'big' ) ids . append ( rid ) return ids
Get ids to search for to keep old buckets up to date .
66
14
225,590
def handle_call_response ( self , result , node ) : if not result [ 0 ] : log . warning ( "no response from %s, removing from router" , node ) self . router . remove_contact ( node ) return result log . info ( "got successful response from %s" , node ) self . welcome_if_new ( node ) return result
If we get a response add the node to the routing table . If we get no response make sure it s removed from the routing table .
78
28
225,591
def deinit ( self ) : for i in range ( len ( self . buf ) ) : self . buf [ i ] = 0 neopixel_write ( self . pin , self . buf ) self . pin . deinit ( )
Blank out the NeoPixels and release the pin .
50
12
225,592
def show ( self ) : if self . brightness > 0.99 : neopixel_write ( self . pin , self . buf ) else : neopixel_write ( self . pin , bytearray ( [ int ( i * self . brightness ) for i in self . buf ] ) )
Shows the new colors on the pixels themselves if they haven t already been autowritten .
64
19
225,593
def _set_k8s_attribute ( obj , attribute , value ) : current_value = None attribute_name = None # All k8s python client objects have an 'attribute_map' property # which has as keys python style attribute names (api_client) # and as values the kubernetes JSON API style attribute names # (apiClient). We want to allow users to use the JSON API style attribute # names only. for python_attribute , json_attribute in obj . attribute_map . items ( ) : if json_attribute == attribute : attribute_name = python_attribute break else : raise ValueError ( 'Attribute must be one of {}' . format ( obj . attribute_map . values ( ) ) ) if hasattr ( obj , attribute_name ) : current_value = getattr ( obj , attribute_name ) if current_value is not None : # This will ensure that current_value is something JSONable, # so a dict, list, or scalar current_value = SERIALIZATION_API_CLIENT . sanitize_for_serialization ( current_value ) if isinstance ( current_value , dict ) : # Deep merge our dictionaries! setattr ( obj , attribute_name , merge_dictionaries ( current_value , value ) ) elif isinstance ( current_value , list ) : # Just append lists setattr ( obj , attribute_name , current_value + value ) else : # Replace everything else setattr ( obj , attribute_name , value )
Set a specific value on a kubernetes object s attribute
324
13
225,594
def merge_dictionaries ( a , b , path = None , update = True ) : if path is None : path = [ ] for key in b : if key in a : if isinstance ( a [ key ] , dict ) and isinstance ( b [ key ] , dict ) : merge_dictionaries ( a [ key ] , b [ key ] , path + [ str ( key ) ] ) elif a [ key ] == b [ key ] : pass # same leaf value elif isinstance ( a [ key ] , list ) and isinstance ( b [ key ] , list ) : for idx , val in enumerate ( b [ key ] ) : a [ key ] [ idx ] = merge_dictionaries ( a [ key ] [ idx ] , b [ key ] [ idx ] , path + [ str ( key ) , str ( idx ) ] , update = update ) elif update : a [ key ] = b [ key ] else : raise Exception ( 'Conflict at %s' % '.' . join ( path + [ str ( key ) ] ) ) else : a [ key ] = b [ key ] return a
Merge two dictionaries recursively .
249
9
225,595
def make_pod_spec ( image , labels = { } , threads_per_worker = 1 , env = { } , extra_container_config = { } , extra_pod_config = { } , memory_limit = None , memory_request = None , cpu_limit = None , cpu_request = None , ) : args = [ 'dask-worker' , '$(DASK_SCHEDULER_ADDRESS)' , '--nthreads' , str ( threads_per_worker ) , '--death-timeout' , '60' , ] if memory_limit : args . extend ( [ '--memory-limit' , str ( memory_limit ) ] ) pod = client . V1Pod ( metadata = client . V1ObjectMeta ( labels = labels ) , spec = client . V1PodSpec ( restart_policy = 'Never' , containers = [ client . V1Container ( name = 'dask-worker' , image = image , args = args , env = [ client . V1EnvVar ( name = k , value = v ) for k , v in env . items ( ) ] , ) ] , tolerations = [ client . V1Toleration ( key = 'k8s.dask.org/dedicated' , operator = 'Equal' , value = 'worker' , effect = 'NoSchedule' , ) , # GKE currently does not permit creating taints on a node pool # with a `/` in the key field client . V1Toleration ( key = 'k8s.dask.org_dedicated' , operator = 'Equal' , value = 'worker' , effect = 'NoSchedule' , ) , ] ) ) resources = client . V1ResourceRequirements ( limits = { } , requests = { } ) if cpu_request : resources . requests [ 'cpu' ] = cpu_request if memory_request : resources . requests [ 'memory' ] = memory_request if cpu_limit : resources . limits [ 'cpu' ] = cpu_limit if memory_limit : resources . limits [ 'memory' ] = memory_limit pod . spec . containers [ 0 ] . resources = resources for key , value in extra_container_config . items ( ) : _set_k8s_attribute ( pod . spec . containers [ 0 ] , key , value ) for key , value in extra_pod_config . items ( ) : _set_k8s_attribute ( pod . spec , key , value ) return pod
Create generic pod template from input parameters
546
7
225,596
def clean_pod_template ( pod_template ) : if isinstance ( pod_template , str ) : msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a yaml filename then use ' 'KubeCluster.from_yaml' ) raise TypeError ( msg % pod_template ) if isinstance ( pod_template , dict ) : msg = ( 'Expected a kubernetes.client.V1Pod object, got %s' 'If trying to pass a dictionary specification then use ' 'KubeCluster.from_dict' ) raise TypeError ( msg % str ( pod_template ) ) pod_template = copy . deepcopy ( pod_template ) # Make sure metadata / labels / env objects exist, so they can be modified # later without a lot of `is None` checks if pod_template . metadata is None : pod_template . metadata = client . V1ObjectMeta ( ) if pod_template . metadata . labels is None : pod_template . metadata . labels = { } if pod_template . spec . containers [ 0 ] . env is None : pod_template . spec . containers [ 0 ] . env = [ ] return pod_template
Normalize pod template and check for type errors
268
9
225,597
def _cleanup_pods ( namespace , labels ) : api = kubernetes . client . CoreV1Api ( ) pods = api . list_namespaced_pod ( namespace , label_selector = format_labels ( labels ) ) for pod in pods . items : try : api . delete_namespaced_pod ( pod . metadata . name , namespace ) logger . info ( 'Deleted pod: %s' , pod . metadata . name ) except kubernetes . client . rest . ApiException as e : # ignore error if pod is already removed if e . status != 404 : raise
Remove all pods with these labels in this namespace
132
9
225,598
def format_labels ( labels ) : if labels : return ',' . join ( [ '{}={}' . format ( k , v ) for k , v in labels . items ( ) ] ) else : return ''
Convert a dictionary of labels into a comma separated string
48
11
225,599
def _namespace_default ( ) : ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace' if os . path . exists ( ns_path ) : with open ( ns_path ) as f : return f . read ( ) . strip ( ) return 'default'
Get current namespace if running in a k8s cluster
70
11