idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
39,300
def animate_police_lights ( self , color1 , color2 , group1 = 'LEFT' , group2 = 'RIGHT' , sleeptime = 0.5 , duration = 5 , block = True ) : def _animate_police_lights ( ) : self . all_off ( ) even = True start_time = dt . datetime . now ( ) while True : if even : self . set_color ( group1 , color1 ) self . set_color ( group2 , color2 ) else : self . set_color ( group1 , color2 ) self . set_color ( group2 , color1 ) if self . animate_thread_stop or duration_expired ( start_time , duration ) : break even = not even sleep ( sleeptime ) self . animate_thread_stop = False self . animate_thread_id = None self . animate_stop ( ) if block : _animate_police_lights ( ) else : self . animate_thread_id = _thread . start_new_thread ( _animate_police_lights , ( ) )
Cycle the group1 and group2 LEDs between color1 and color2 to give the effect of police lights . Alternate the group1 and group2 LEDs every sleeptime seconds .
39,301
def animate_cycle ( self , colors , groups = ( 'LEFT' , 'RIGHT' ) , sleeptime = 0.5 , duration = 5 , block = True ) : def _animate_cycle ( ) : index = 0 max_index = len ( colors ) start_time = dt . datetime . now ( ) while True : for group in groups : self . set_color ( group , colors [ index ] ) index += 1 if index == max_index : index = 0 if self . animate_thread_stop or duration_expired ( start_time , duration ) : break sleep ( sleeptime ) self . animate_thread_stop = False self . animate_thread_id = None self . animate_stop ( ) if block : _animate_cycle ( ) else : self . animate_thread_id = _thread . start_new_thread ( _animate_cycle , ( ) )
Cycle groups LEDs through colors . Do this in a loop where we display each color for sleeptime seconds .
39,302
def animate_rainbow ( self , group1 = 'LEFT' , group2 = 'RIGHT' , increment_by = 0.1 , sleeptime = 0.1 , duration = 5 , block = True ) : def _animate_rainbow ( ) : state = 0 left_value = 0 right_value = 0 MIN_VALUE = 0 MAX_VALUE = 1 self . all_off ( ) start_time = dt . datetime . now ( ) while True : if state == 0 : left_value += increment_by elif state == 1 : right_value += increment_by elif state == 2 : left_value -= increment_by elif state == 3 : right_value -= increment_by else : raise Exception ( "Invalid state {}" . format ( state ) ) left_value = min ( left_value , MAX_VALUE ) right_value = min ( right_value , MAX_VALUE ) left_value = max ( left_value , MIN_VALUE ) right_value = max ( right_value , MIN_VALUE ) self . set_color ( group1 , ( left_value , right_value ) ) self . set_color ( group2 , ( left_value , right_value ) ) if state == 0 and left_value == MAX_VALUE : state = 1 elif state == 1 and right_value == MAX_VALUE : state = 2 elif state == 2 and left_value == MIN_VALUE : state = 3 elif state == 3 and right_value == MIN_VALUE : state = 0 if self . animate_thread_stop or duration_expired ( start_time , duration ) : break sleep ( sleeptime ) self . animate_thread_stop = False self . animate_thread_id = None self . animate_stop ( ) if block : _animate_rainbow ( ) else : self . animate_thread_id = _thread . start_new_thread ( _animate_rainbow , ( ) )
Gradually fade from one color to the next
39,303
def raw ( self ) : self . _ensure_mode ( self . MODE_RGB_RAW ) return self . value ( 0 ) , self . value ( 1 ) , self . value ( 2 )
Red green and blue components of the detected color as a tuple . Officially in the range 0 - 1020 but the values returned will never be that high . We do not yet know why the values returned are low but pointing the color sensor at a well lit sheet of white paper will return values in the 250 - 400 range .
39,304
def lab ( self ) : RGB = [ 0 , 0 , 0 ] XYZ = [ 0 , 0 , 0 ] for ( num , value ) in enumerate ( self . rgb ) : if value > 0.04045 : value = pow ( ( ( value + 0.055 ) / 1.055 ) , 2.4 ) else : value = value / 12.92 RGB [ num ] = value * 100.0 X = ( RGB [ 0 ] * 0.4124564 ) + ( RGB [ 1 ] * 0.3575761 ) + ( RGB [ 2 ] * 0.1804375 ) Y = ( RGB [ 0 ] * 0.2126729 ) + ( RGB [ 1 ] * 0.7151522 ) + ( RGB [ 2 ] * 0.0721750 ) Z = ( RGB [ 0 ] * 0.0193339 ) + ( RGB [ 1 ] * 0.1191920 ) + ( RGB [ 2 ] * 0.9503041 ) XYZ [ 0 ] = X / 95.047 XYZ [ 1 ] = Y / 100.0 XYZ [ 2 ] = Z / 108.883 for ( num , value ) in enumerate ( XYZ ) : if value > 0.008856 : value = pow ( value , ( 1.0 / 3.0 ) ) else : value = ( 7.787 * value ) + ( 16 / 116.0 ) XYZ [ num ] = value L = ( 116.0 * XYZ [ 1 ] ) - 16 a = 500.0 * ( XYZ [ 0 ] - XYZ [ 1 ] ) b = 200.0 * ( XYZ [ 1 ] - XYZ [ 2 ] ) L = round ( L , 4 ) a = round ( a , 4 ) b = round ( b , 4 ) return ( L , a , b )
Return colors in Lab color space
39,305
def wait_until_angle_changed_by ( self , delta , direction_sensitive = False ) : assert self . mode in ( self . MODE_GYRO_G_A , self . MODE_GYRO_ANG , self . MODE_TILT_ANG ) , 'Gyro mode should be MODE_GYRO_ANG, MODE_GYRO_G_A or MODE_TILT_ANG' start_angle = self . value ( 0 ) if direction_sensitive : if delta > 0 : while ( self . value ( 0 ) - start_angle ) < delta : time . sleep ( 0.01 ) else : delta *= - 1 while ( start_angle - self . value ( 0 ) ) < delta : time . sleep ( 0.01 ) else : while abs ( start_angle - self . value ( 0 ) ) < delta : time . sleep ( 0.01 )
Wait until angle has changed by specified amount .
39,306
def buttons_pressed ( self , channel = 1 ) : self . _ensure_mode ( self . MODE_IR_REMOTE ) channel = self . _normalize_channel ( channel ) return self . _BUTTON_VALUES . get ( self . value ( channel ) , [ ] )
Returns list of currently pressed buttons .
39,307
def sound_pressure ( self ) : self . _ensure_mode ( self . MODE_DB ) return self . value ( 0 ) * self . _scale ( 'DB' )
A measurement of the measured sound pressure level as a percent . Uses a flat weighting .
39,308
def sound_pressure_low ( self ) : self . _ensure_mode ( self . MODE_DBA ) return self . value ( 0 ) * self . _scale ( 'DBA' )
A measurement of the measured sound pressure level as a percent . Uses A - weighting which focuses on levels up to 55 dB .
39,309
def reflected_light_intensity ( self ) : self . _ensure_mode ( self . MODE_REFLECT ) return self . value ( 0 ) * self . _scale ( 'REFLECT' )
A measurement of the reflected light intensity as a percentage .
39,310
def ambient_light_intensity ( self ) : self . _ensure_mode ( self . MODE_AMBIENT ) return self . value ( 0 ) * self . _scale ( 'AMBIENT' )
A measurement of the ambient light intensity as a percentage .
39,311
def available ( ) : font_dir = os . path . dirname ( __file__ ) names = [ os . path . basename ( os . path . splitext ( f ) [ 0 ] ) for f in glob ( os . path . join ( font_dir , '*.pil' ) ) ] return sorted ( names )
Returns list of available font names .
39,312
def modes ( self ) : ( self . _modes , value ) = self . get_cached_attr_set ( self . _modes , 'modes' ) return value
Returns a list of the available modes of the port .
39,313
def mode ( self ) : self . _mode , value = self . get_attr_string ( self . _mode , 'mode' ) return value
Reading returns the currently selected mode . Writing sets the mode . Generally speaking when the mode changes any sensor or motor devices associated with the port will be removed new ones loaded however this this will depend on the individual driver implementing this class .
39,314
def status ( self ) : self . _status , value = self . get_attr_string ( self . _status , 'status' ) return value
In most cases reading status will return the same value as mode . In cases where there is an auto mode additional values may be returned such as no - device or error . See individual port driver documentation for the full list of possible values .
39,315
def list_sensors ( name_pattern = Sensor . SYSTEM_DEVICE_NAME_CONVENTION , ** kwargs ) : class_path = abspath ( Device . DEVICE_ROOT_PATH + '/' + Sensor . SYSTEM_CLASS_NAME ) return ( Sensor ( name_pattern = name , name_exact = True ) for name in list_device_names ( class_path , name_pattern , ** kwargs ) )
This is a generator function that enumerates all sensors that match the provided arguments .
39,316
def _scale ( self , mode ) : if mode in self . _mode_scale : scale = self . _mode_scale [ mode ] else : scale = 10 ** ( - self . decimals ) self . _mode_scale [ mode ] = scale return scale
Returns value scaling coefficient for the given mode .
39,317
def units ( self ) : self . _units , value = self . get_attr_string ( self . _units , 'units' ) return value
Returns the units of the measured value for the current mode . May return empty string
39,318
def value ( self , n = 0 ) : n = int ( n ) self . _value [ n ] , value = self . get_attr_int ( self . _value [ n ] , 'value' + str ( n ) ) return value
Returns the value or values measured by the sensor . Check num_values to see how many values there are . Values with N > = num_values will return an error . The values are fixed point numbers so check decimals to see if you need to divide to get the actual value .
39,319
def _open_fbdev ( fbdev = None ) : dev = fbdev or os . getenv ( 'FRAMEBUFFER' , '/dev/fb0' ) fbfid = os . open ( dev , os . O_RDWR ) return fbfid
Return the framebuffer file descriptor .
39,320
def _get_fix_info ( fbfid ) : fix_info = FbMem . FixScreenInfo ( ) fcntl . ioctl ( fbfid , FbMem . FBIOGET_FSCREENINFO , fix_info ) return fix_info
Return the fix screen info from the framebuffer file descriptor .
39,321
def _get_var_info ( fbfid ) : var_info = FbMem . VarScreenInfo ( ) fcntl . ioctl ( fbfid , FbMem . FBIOGET_VSCREENINFO , var_info ) return var_info
Return the var screen info from the framebuffer file descriptor .
39,322
def _map_fb_memory ( fbfid , fix_info ) : return mmap . mmap ( fbfid , fix_info . smem_len , mmap . MAP_SHARED , mmap . PROT_READ | mmap . PROT_WRITE , offset = 0 )
Map the framebuffer memory .
39,323
def update ( self ) : if self . var_info . bits_per_pixel == 1 : b = self . _img . tobytes ( "raw" , "1;R" ) self . mmap [ : len ( b ) ] = b elif self . var_info . bits_per_pixel == 16 : self . mmap [ : ] = self . _img_to_rgb565_bytes ( ) elif self . var_info . bits_per_pixel == 32 : self . mmap [ : ] = self . _img . convert ( "RGB" ) . tobytes ( "raw" , "XRGB" ) else : raise Exception ( "Not supported - platform %s with bits_per_pixel %s" % ( self . platform , self . var_info . bits_per_pixel ) )
Applies pending changes to the screen . Nothing will be drawn on the screen until this function is called .
39,324
def _make_scales ( notes ) : res = dict ( ) for note , freq in notes : freq = round ( freq ) for n in note . split ( '/' ) : res [ n ] = freq return res
Utility function used by Sound class for building the note frequencies table
39,325
def play_tone ( self , frequency , duration , delay = 0.0 , volume = 100 , play_type = PLAY_WAIT_FOR_COMPLETE ) : self . _validate_play_type ( play_type ) if duration <= 0 : raise ValueError ( 'invalid duration (%s)' % duration ) if delay < 0 : raise ValueError ( 'invalid delay (%s)' % delay ) if not 0 < volume <= 100 : raise ValueError ( 'invalid volume (%s)' % volume ) self . set_volume ( volume ) duration_ms = int ( duration * 1000 ) delay_ms = int ( delay * 1000 ) self . tone ( [ ( frequency , duration_ms , delay_ms ) ] , play_type = play_type )
Play a single tone specified by its frequency duration volume and final delay .
39,326
def play_note ( self , note , duration , volume = 100 , play_type = PLAY_WAIT_FOR_COMPLETE ) : self . _validate_play_type ( play_type ) try : freq = self . _NOTE_FREQUENCIES . get ( note . upper ( ) , self . _NOTE_FREQUENCIES [ note ] ) except KeyError : raise ValueError ( 'invalid note (%s)' % note ) if duration <= 0 : raise ValueError ( 'invalid duration (%s)' % duration ) if not 0 < volume <= 100 : raise ValueError ( 'invalid volume (%s)' % volume ) return self . play_tone ( freq , duration = duration , volume = volume , play_type = play_type )
Plays a note given by its name as defined in _NOTE_FREQUENCIES .
39,327
def speak ( self , text , espeak_opts = '-a 200 -s 130' , volume = 100 , play_type = PLAY_WAIT_FOR_COMPLETE ) : self . _validate_play_type ( play_type ) self . set_volume ( volume ) with open ( os . devnull , 'w' ) as n : cmd_line = [ '/usr/bin/espeak' , '--stdout' ] + shlex . split ( espeak_opts ) + [ shlex . quote ( text ) ] aplay_cmd_line = shlex . split ( '/usr/bin/aplay -q' ) if play_type == Sound . PLAY_WAIT_FOR_COMPLETE : espeak = Popen ( cmd_line , stdout = PIPE ) play = Popen ( aplay_cmd_line , stdin = espeak . stdout , stdout = n ) play . wait ( ) elif play_type == Sound . PLAY_NO_WAIT_FOR_COMPLETE : espeak = Popen ( cmd_line , stdout = PIPE ) return Popen ( aplay_cmd_line , stdin = espeak . stdout , stdout = n ) elif play_type == Sound . PLAY_LOOP : while True : espeak = Popen ( cmd_line , stdout = PIPE ) play = Popen ( aplay_cmd_line , stdin = espeak . stdout , stdout = n ) play . wait ( )
Speak the given text aloud .
39,328
def play_song ( self , song , tempo = 120 , delay = 0.05 ) : if tempo <= 0 : raise ValueError ( 'invalid tempo (%s)' % tempo ) if delay < 0 : raise ValueError ( 'invalid delay (%s)' % delay ) delay_ms = int ( delay * 1000 ) meas_duration_ms = 60000 / tempo * 4 def beep_args ( note , value ) : freq = self . _NOTE_FREQUENCIES . get ( note . upper ( ) , self . _NOTE_FREQUENCIES [ note ] ) if '/' in value : base , factor = value . split ( '/' ) duration_ms = meas_duration_ms * self . _NOTE_VALUES [ base ] / float ( factor ) elif '*' in value : base , factor = value . split ( '*' ) duration_ms = meas_duration_ms * self . _NOTE_VALUES [ base ] * float ( factor ) elif value . endswith ( '.' ) : base = value [ : - 1 ] duration_ms = meas_duration_ms * self . _NOTE_VALUES [ base ] * 1.5 elif value . endswith ( '3' ) : base = value [ : - 1 ] duration_ms = meas_duration_ms * self . _NOTE_VALUES [ base ] * 2 / 3 else : duration_ms = meas_duration_ms * self . _NOTE_VALUES [ value ] return '-f %d -l %d -D %d' % ( freq , duration_ms , delay_ms ) try : return self . beep ( ' -n ' . join ( [ beep_args ( note , value ) for ( note , value ) in song ] ) ) except KeyError as e : raise ValueError ( 'invalid note (%s)' % e )
Plays a song provided as a list of tuples containing the note name and its value using music conventional notation instead of numerical values for frequency and duration .
39,329
def list_device_names ( class_path , name_pattern , ** kwargs ) : if not os . path . isdir ( class_path ) : return def matches ( attribute , pattern ) : try : with io . FileIO ( attribute ) as f : value = f . read ( ) . strip ( ) . decode ( ) except : return False if isinstance ( pattern , list ) : return any ( [ value . find ( p ) >= 0 for p in pattern ] ) else : return value . find ( pattern ) >= 0 for f in os . listdir ( class_path ) : if fnmatch . fnmatch ( f , name_pattern ) : path = class_path + '/' + f if all ( [ matches ( path + '/' + k , kwargs [ k ] ) for k in kwargs ] ) : yield f
This is a generator function that lists names of all devices matching the provided parameters .
39,330
def list_devices ( class_name , name_pattern , ** kwargs ) : classpath = abspath ( Device . DEVICE_ROOT_PATH + '/' + class_name ) return ( Device ( class_name , name , name_exact = True ) for name in list_device_names ( classpath , name_pattern , ** kwargs ) )
This is a generator function that takes same arguments as Device class and enumerates all devices present in the system that match the provided arguments .
39,331
def _get_attribute ( self , attribute , name ) : try : if attribute is None : attribute = self . _attribute_file_open ( name ) else : attribute . seek ( 0 ) return attribute , attribute . read ( ) . strip ( ) . decode ( ) except Exception as ex : self . _raise_friendly_access_error ( ex , name )
Device attribute getter
39,332
def _set_attribute ( self , attribute , name , value ) : try : if attribute is None : attribute = self . _attribute_file_open ( name ) else : attribute . seek ( 0 ) if isinstance ( value , str ) : value = value . encode ( ) attribute . write ( value ) attribute . flush ( ) except Exception as ex : self . _raise_friendly_access_error ( ex , name ) return attribute
Device attribute setter
39,333
def shutdown ( self ) : self . stop_balance . set ( ) self . motor_left . stop ( ) self . motor_right . stop ( ) self . gyro_file . close ( ) self . touch_file . close ( ) self . encoder_left_file . close ( ) self . encoder_right_file . close ( ) self . dc_left_file . close ( ) self . dc_right_file . close ( )
Close all file handles and stop all motors .
39,334
def _fast_read ( self , infile ) : infile . seek ( 0 ) return ( int ( infile . read ( ) . decode ( ) . strip ( ) ) )
Function for fast reading from sensor files .
39,335
def _fast_write ( self , outfile , value ) : outfile . truncate ( 0 ) outfile . write ( str ( int ( value ) ) ) outfile . flush ( )
Function for fast writing to motor files .
39,336
def _set_duty ( self , motor_duty_file , duty , friction_offset , voltage_comp ) : duty_int = int ( round ( duty * voltage_comp ) ) if duty_int > 0 : duty_int = min ( 100 , duty_int + friction_offset ) elif duty_int < 0 : duty_int = max ( - 100 , duty_int - friction_offset ) self . _fast_write ( motor_duty_file , duty_int )
Function to set the duty cycle of the motors .
39,337
def balance ( self ) : balance_thread = threading . Thread ( target = self . _balance ) balance_thread . start ( )
Run the _balance method as a thread .
39,338
def _move ( self , speed = 0 , steering = 0 , seconds = None ) : self . drive_queue . put ( ( speed , steering ) ) if seconds is not None : time . sleep ( seconds ) self . drive_queue . put ( ( 0 , 0 ) ) self . drive_queue . join ( )
Move robot .
39,339
def move_forward ( self , seconds = None ) : self . _move ( speed = SPEED_MAX , steering = 0 , seconds = seconds )
Move robot forward .
39,340
def move_backward ( self , seconds = None ) : self . _move ( speed = - SPEED_MAX , steering = 0 , seconds = seconds )
Move robot backward .
39,341
def rotate_left ( self , seconds = None ) : self . _move ( speed = 0 , steering = STEER_MAX , seconds = seconds )
Rotate robot left .
39,342
def rotate_right ( self , seconds = None ) : self . _move ( speed = 0 , steering = - STEER_MAX , seconds = seconds )
Rotate robot right .
39,343
def evdev_device ( self ) : devices = [ evdev . InputDevice ( fn ) for fn in evdev . list_devices ( ) ] for device in devices : if device . name == self . evdev_device_name : return device raise Exception ( "%s: could not find evdev device '%s'" % ( self , self . evdev_device_name ) )
Return our corresponding evdev device object
39,344
def wait_for_bump ( self , buttons , timeout_ms = None ) : start_time = time . time ( ) if self . wait_for_pressed ( buttons , timeout_ms ) : if timeout_ms is not None : timeout_ms -= int ( ( time . time ( ) - start_time ) * 1000 ) return self . wait_for_released ( buttons , timeout_ms ) return False
Wait for the button to be pressed down and then released . Both actions must happen within timeout_ms .
39,345
def buttons_pressed ( self ) : for b in self . _buffer_cache : fcntl . ioctl ( self . _button_file ( b ) , self . EVIOCGKEY , self . _buffer_cache [ b ] ) pressed = [ ] for k , v in self . _buttons . items ( ) : buf = self . _buffer_cache [ v [ 'name' ] ] bit = v [ 'value' ] if bool ( buf [ int ( bit / 8 ) ] & 1 << bit % 8 ) : pressed . append ( k ) return pressed
Returns list of names of pressed buttons .
39,346
def _orted_process ( ) : for i in range ( 5 * 60 ) : procs = [ p for p in psutil . process_iter ( attrs = [ 'name' ] ) if p . info [ 'name' ] == 'orted' ] if procs : return procs time . sleep ( 1 )
Waits maximum of 5 minutes for orted process to start
39,347
def _parse_custom_mpi_options ( custom_mpi_options ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( '--NCCL_DEBUG' , default = "INFO" , type = str ) return parser . parse_known_args ( custom_mpi_options . split ( ) )
Parse custom MPI options provided by user . Known options default value will be overridden and unknown options would be identified separately .
39,348
def download_and_install ( uri , name = DEFAULT_MODULE_NAME , cache = True ) : should_use_cache = cache and exists ( name ) if not should_use_cache : with _files . tmpdir ( ) as tmpdir : if uri . startswith ( 's3://' ) : dst = os . path . join ( tmpdir , 'tar_file' ) _files . s3_download ( uri , dst ) module_path = os . path . join ( tmpdir , 'module_dir' ) os . makedirs ( module_path ) with tarfile . open ( name = dst , mode = 'r:gz' ) as t : t . extractall ( path = module_path ) else : module_path = uri prepare ( module_path , name ) install ( module_path )
Download prepare and install a compressed tar file from S3 or local directory as a module .
39,349
def run ( module_name , args = None , env_vars = None , wait = True , capture_error = False ) : args = args or [ ] env_vars = env_vars or { } cmd = [ _process . python_executable ( ) , '-m' , module_name ] + args _logging . log_script_invocation ( cmd , env_vars ) if wait : return _process . check_error ( cmd , _errors . ExecuteUserScriptError , capture_error = capture_error ) else : return _process . create ( cmd , _errors . ExecuteUserScriptError , capture_error = capture_error )
Run Python module as a script .
39,350
def run_module ( uri , args , env_vars = None , name = DEFAULT_MODULE_NAME , cache = None , wait = True , capture_error = False ) : _warning_cache_deprecation ( cache ) env_vars = env_vars or { } env_vars = env_vars . copy ( ) _files . download_and_extract ( uri , name , _env . code_dir ) prepare ( _env . code_dir , name ) install ( _env . code_dir ) _env . write_env_vars ( env_vars ) return run ( name , args , env_vars , wait , capture_error )
Download prepare and executes a compressed tar file from S3 or provided directory as a module .
39,351
def content_type ( self ) : return self . headers . get ( 'ContentType' ) or self . headers . get ( 'Content-Type' ) or _content_types . JSON
The request s content - type .
39,352
def accept ( self ) : accept = self . headers . get ( 'Accept' ) if not accept or accept == _content_types . ANY : return self . _default_accept else : return accept
The content - type for the response to the client .
39,353
def content ( self ) : as_text = self . content_type in _content_types . UTF8_TYPES return self . get_data ( as_text = as_text )
The request incoming data .
39,354
def run ( uri , user_entry_point , args , env_vars = None , wait = True , capture_error = False , runner = _runner . ProcessRunnerType , extra_opts = None ) : env_vars = env_vars or { } env_vars = env_vars . copy ( ) _files . download_and_extract ( uri , user_entry_point , _env . code_dir ) install ( user_entry_point , _env . code_dir , capture_error ) _env . write_env_vars ( env_vars ) return _runner . get ( runner , user_entry_point , args , env_vars , extra_opts ) . run ( wait , capture_error )
Download prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint . Runs the user entry point passing env_vars as environment variables and args as command arguments .
39,355
def configure_logger ( level , format = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s' ) : logging . basicConfig ( format = format , level = level ) if level >= logging . INFO : logging . getLogger ( 'boto3' ) . setLevel ( logging . INFO ) logging . getLogger ( 's3transfer' ) . setLevel ( logging . INFO ) logging . getLogger ( 'botocore' ) . setLevel ( logging . WARN )
Set logger configuration .
39,356
def _timestamp ( ) : moment = time . time ( ) moment_us = repr ( moment ) . split ( '.' ) [ 1 ] return time . strftime ( "%Y-%m-%d-%H-%M-%S-{}" . format ( moment_us ) , time . gmtime ( moment ) )
Return a timestamp with microsecond precision .
39,357
def split_by_criteria ( dictionary , keys = None , prefix = None ) : keys = keys or [ ] keys = set ( keys ) included_items = { k : dictionary [ k ] for k in dictionary . keys ( ) if k in keys or ( prefix and k . startswith ( prefix ) ) } excluded_items = { k : dictionary [ k ] for k in dictionary . keys ( ) if k not in included_items } return SplitResultSpec ( included = included_items , excluded = excluded_items )
Split a dictionary in two by the provided keys .
39,358
def default_output_fn ( prediction , accept ) : return _worker . Response ( response = _encoders . encode ( prediction , accept ) , mimetype = accept )
Function responsible to serialize the prediction for the response .
39,359
def transform ( self ) : request = _worker . Request ( ) result = self . _transform_fn ( self . _model , request . content , request . content_type , request . accept ) if isinstance ( result , tuple ) : return _worker . Response ( response = result [ 0 ] , mimetype = result [ 1 ] ) return result
Take a request with input data deserialize it make a prediction and return a serialized response .
39,360
def _default_transform_fn ( self , model , content , content_type , accept ) : try : data = self . _input_fn ( content , content_type ) except _errors . UnsupportedFormatError as e : return self . _error_response ( e , http_client . UNSUPPORTED_MEDIA_TYPE ) prediction = self . _predict_fn ( data , model ) try : result = self . _output_fn ( prediction , accept ) except _errors . UnsupportedFormatError as e : return self . _error_response ( e , http_client . NOT_ACCEPTABLE ) return result
Make predictions against the model and return a serialized response .
39,361
def training_env ( ) : from sagemaker_containers import _env return _env . TrainingEnv ( resource_config = _env . read_resource_config ( ) , input_data_config = _env . read_input_data_config ( ) , hyperparameters = _env . read_hyperparameters ( ) )
Create a TrainingEnv .
39,362
def _write_json ( obj , path ) : with open ( path , 'w' ) as f : json . dump ( obj , f )
Writes a serializeable object as a JSON file
39,363
def _create_training_directories ( ) : logger . info ( 'Creating a new training folder under %s .' % base_dir ) os . makedirs ( model_dir ) os . makedirs ( input_config_dir ) os . makedirs ( output_data_dir ) _write_json ( { } , hyperparameters_file_dir ) _write_json ( { } , input_data_config_file_dir ) host_name = socket . gethostname ( ) resources_dict = { "current_host" : host_name , "hosts" : [ host_name ] } _write_json ( resources_dict , resource_config_file_dir )
Creates the directory structure and files necessary for training under the base path
39,364
def num_gpus ( ) : try : cmd = shlex . split ( 'nvidia-smi --list-gpus' ) output = subprocess . check_output ( cmd ) . decode ( 'utf-8' ) return sum ( [ 1 for x in output . split ( '\n' ) if x . startswith ( 'GPU ' ) ] ) except ( OSError , subprocess . CalledProcessError ) : logger . info ( 'No GPUs detected (normal if no gpus installed)' ) return 0
The number of gpus available in the current container .
39,365
def write_env_vars ( env_vars = None ) : env_vars = env_vars or { } env_vars [ 'PYTHONPATH' ] = ':' . join ( sys . path ) for name , value in env_vars . items ( ) : os . environ [ name ] = value
Write the dictionary env_vars in the system as environment variables .
39,366
def to_env_vars ( self ) : env = { 'hosts' : self . hosts , 'network_interface_name' : self . network_interface_name , 'hps' : self . hyperparameters , 'user_entry_point' : self . user_entry_point , 'framework_params' : self . additional_framework_parameters , 'resource_config' : self . resource_config , 'input_data_config' : self . input_data_config , 'output_data_dir' : self . output_data_dir , 'channels' : sorted ( self . channel_input_dirs . keys ( ) ) , 'current_host' : self . current_host , 'module_name' : self . module_name , 'log_level' : self . log_level , 'framework_module' : self . framework_module , 'input_dir' : self . input_dir , 'input_config_dir' : self . input_config_dir , 'output_dir' : self . output_dir , 'num_cpus' : self . num_cpus , 'num_gpus' : self . num_gpus , 'model_dir' : self . model_dir , 'module_dir' : self . module_dir , 'training_env' : dict ( self ) , 'user_args' : self . to_cmd_args ( ) , 'output_intermediate_dir' : self . output_intermediate_dir } for name , path in self . channel_input_dirs . items ( ) : env [ 'channel_%s' % name ] = path for key , value in self . hyperparameters . items ( ) : env [ 'hp_%s' % key ] = value return _mapping . to_env_vars ( env )
Environment variable representation of the training environment
39,367
def array_to_npy ( array_like ) : buffer = BytesIO ( ) np . save ( buffer , array_like ) return buffer . getvalue ( )
Convert an array like object to the NPY format .
39,368
def npy_to_numpy ( npy_array ) : stream = BytesIO ( npy_array ) return np . load ( stream , allow_pickle = True )
Convert an NPY array into numpy .
39,369
def array_to_json ( array_like ) : def default ( _array_like ) : if hasattr ( _array_like , 'tolist' ) : return _array_like . tolist ( ) return json . JSONEncoder ( ) . default ( _array_like ) return json . dumps ( array_like , default = default )
Convert an array like object to JSON .
39,370
def json_to_numpy ( string_like , dtype = None ) : data = json . loads ( string_like ) return np . array ( data , dtype = dtype )
Convert a JSON object to a numpy array .
39,371
def csv_to_numpy ( string_like , dtype = None ) : stream = StringIO ( string_like ) return np . genfromtxt ( stream , dtype = dtype , delimiter = ',' )
Convert a CSV object to a numpy array .
39,372
def array_to_csv ( array_like ) : stream = StringIO ( ) np . savetxt ( stream , array_like , delimiter = ',' , fmt = '%s' ) return stream . getvalue ( )
Convert an array like object to CSV .
39,373
def decode ( obj , content_type ) : try : decoder = _decoders_map [ content_type ] return decoder ( obj ) except KeyError : raise _errors . UnsupportedFormatError ( content_type )
Decode an object ton a one of the default content types to a numpy array .
39,374
def encode ( array_like , content_type ) : try : encoder = _encoders_map [ content_type ] return encoder ( array_like ) except KeyError : raise _errors . UnsupportedFormatError ( content_type )
Encode an array like object in a specific content_type to a numpy array .
39,375
def tmpdir ( suffix = '' , prefix = 'tmp' , dir = None ) : tmp = tempfile . mkdtemp ( suffix = suffix , prefix = prefix , dir = dir ) yield tmp shutil . rmtree ( tmp )
Create a temporary directory with a context manager . The file is deleted when the context exits .
39,376
def download_and_extract ( uri , name , path ) : if not os . path . exists ( path ) : os . makedirs ( path ) if not os . listdir ( path ) : with tmpdir ( ) as tmp : if uri . startswith ( 's3://' ) : dst = os . path . join ( tmp , 'tar_file' ) s3_download ( uri , dst ) with tarfile . open ( name = dst , mode = 'r:gz' ) as t : t . extractall ( path = path ) elif os . path . isdir ( uri ) : if uri == path : return if os . path . exists ( path ) : shutil . rmtree ( path ) shutil . move ( uri , path ) else : shutil . copy2 ( uri , os . path . join ( path , name ) )
Download prepare and install a compressed tar file from S3 or local directory as an entry point .
39,377
def s3_download ( url , dst ) : url = parse . urlparse ( url ) if url . scheme != 's3' : raise ValueError ( "Expecting 's3' scheme, got: %s in %s" % ( url . scheme , url ) ) bucket , key = url . netloc , url . path . lstrip ( '/' ) region = os . environ . get ( 'AWS_REGION' , os . environ . get ( _params . REGION_NAME_ENV ) ) s3 = boto3 . resource ( 's3' , region_name = region ) s3 . Bucket ( bucket ) . download_file ( key , dst )
Download a file from S3 .
39,378
def matching_args ( fn , dictionary ) : arg_spec = getargspec ( fn ) if arg_spec . keywords : return dictionary return _mapping . split_by_criteria ( dictionary , arg_spec . args ) . included
Given a function fn and a dict dictionary returns the function arguments that match the dict keys .
39,379
def error_wrapper ( fn , error_class ) : def wrapper ( * args , ** kwargs ) : try : return fn ( * args , ** kwargs ) except Exception as e : six . reraise ( error_class , error_class ( e ) , sys . exc_info ( ) [ 2 ] ) return wrapper
Wraps function fn in a try catch block that re - raises error_class .
39,380
def ceph_is_installed ( module ) : ceph_package = Ceph ( module . conn ) if not ceph_package . installed : host = module . conn . hostname raise RuntimeError ( 'ceph needs to be installed in remote host: %s' % host )
A helper callback to be executed after the connection is made to ensure that Ceph is installed .
39,381
def color_format ( ) : str_format = BASE_COLOR_FORMAT if supports_color ( ) else BASE_FORMAT color_format = color_message ( str_format ) return ColoredFormatter ( color_format )
Main entry point to get a colored formatter it will use the BASE_FORMAT by default and fall back to no colors if the system does not support it
39,382
def mon_status_check ( conn , logger , hostname , args ) : asok_path = paths . mon . asok ( args . cluster , hostname ) out , err , code = remoto . process . check ( conn , [ 'ceph' , '--cluster={cluster}' . format ( cluster = args . cluster ) , '--admin-daemon' , asok_path , 'mon_status' , ] , ) for line in err : logger . error ( line ) try : return json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) except ValueError : return { }
A direct check for JSON output on the monitor status .
39,383
def catch_mon_errors ( conn , logger , hostname , cfg , args ) : monmap = mon_status_check ( conn , logger , hostname , args ) . get ( 'monmap' , { } ) mon_initial_members = get_mon_initial_members ( args , _cfg = cfg ) public_addr = cfg . safe_get ( 'global' , 'public_addr' ) public_network = cfg . safe_get ( 'global' , 'public_network' ) mon_in_monmap = [ mon . get ( 'name' ) for mon in monmap . get ( 'mons' , [ { } ] ) if mon . get ( 'name' ) == hostname ] if mon_initial_members is None or not hostname in mon_initial_members : logger . warning ( '%s is not defined in `mon initial members`' , hostname ) if not mon_in_monmap : logger . warning ( 'monitor %s does not exist in monmap' , hostname ) if not public_addr and not public_network : logger . warning ( 'neither `public_addr` nor `public_network` keys are defined for monitors' ) logger . warning ( 'monitors may not be able to form quorum' )
Make sure we are able to catch up common mishaps with monitors and use that state of a monitor to determine what is missing and warn apropriately about it .
39,384
def mon_status ( conn , logger , hostname , args , silent = False ) : mon = 'mon.%s' % hostname try : out = mon_status_check ( conn , logger , hostname , args ) if not out : logger . warning ( 'monitor: %s, might not be running yet' % mon ) return False if not silent : logger . debug ( '*' * 80 ) logger . debug ( 'status for monitor: %s' % mon ) for line in json . dumps ( out , indent = 2 , sort_keys = True ) . split ( '\n' ) : logger . debug ( line ) logger . debug ( '*' * 80 ) if out [ 'rank' ] >= 0 : logger . info ( 'monitor: %s is running' % mon ) return True if out [ 'rank' ] == - 1 and out [ 'state' ] : logger . info ( 'monitor: %s is currently at the state of %s' % ( mon , out [ 'state' ] ) ) return True logger . info ( 'monitor: %s is not running' % mon ) return False except RuntimeError : logger . info ( 'monitor: %s is not running' % mon ) return False
run ceph daemon mon . hostname mon_status on the remote end and provide not only the output but be able to return a boolean status of what is going on . False represents a monitor that is not doing OK even if it is up and running while True would mean the monitor is up and running correctly .
39,385
def hostname_is_compatible ( conn , logger , provided_hostname ) : logger . debug ( 'determining if provided host has same hostname in remote' ) remote_hostname = conn . remote_module . shortname ( ) if remote_hostname == provided_hostname : return logger . warning ( '*' * 80 ) logger . warning ( 'provided hostname must match remote hostname' ) logger . warning ( 'provided hostname: %s' % provided_hostname ) logger . warning ( 'remote hostname: %s' % remote_hostname ) logger . warning ( 'monitors may not reach quorum and create-keys will not complete' ) logger . warning ( '*' * 80 )
Make sure that the host that we are connecting to has the same value as the hostname in the remote host otherwise mons can fail not reaching quorum .
39,386
def make ( parser ) : parser . formatter_class = ToggleRawTextHelpFormatter mon_parser = parser . add_subparsers ( dest = 'subcommand' ) mon_parser . required = True mon_add = mon_parser . add_parser ( 'add' , help = ( 'R|Add a monitor to an existing cluster:\n' '\tceph-deploy mon add node1\n' 'Or:\n' '\tceph-deploy mon add --address 192.168.1.10 node1\n' 'If the section for the monitor exists and defines a `mon addr` that\n' 'will be used, otherwise it will fallback by resolving the hostname to an\n' 'IP. If `--address` is used it will override all other options.' ) ) mon_add . add_argument ( '--address' , nargs = '?' , ) mon_add . add_argument ( 'mon' , nargs = 1 , ) mon_create = mon_parser . add_parser ( 'create' , help = ( 'R|Deploy monitors by specifying them like:\n' '\tceph-deploy mon create node1 node2 node3\n' 'If no hosts are passed it will default to use the\n' '`mon initial members` defined in the configuration.' ) ) mon_create . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , ) mon_create . add_argument ( 'mon' , nargs = '*' , ) mon_create_initial = mon_parser . add_parser ( 'create-initial' , help = ( 'Will deploy for monitors defined in `mon initial members`, ' 'wait until they form quorum and then gatherkeys, reporting ' 'the monitor status along the process. If monitors don\'t form ' 'quorum the command will eventually time out.' ) ) mon_create_initial . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , ) mon_destroy = mon_parser . add_parser ( 'destroy' , help = 'Completely remove Ceph MON from remote host(s)' ) mon_destroy . add_argument ( 'mon' , nargs = '+' , ) parser . set_defaults ( func = mon , )
Ceph MON Daemon management
39,387
def get_mon_initial_members ( args , error_on_empty = False , _cfg = None ) : if _cfg : cfg = _cfg else : cfg = conf . ceph . load ( args ) mon_initial_members = cfg . safe_get ( 'global' , 'mon_initial_members' ) if not mon_initial_members : if error_on_empty : raise exc . NeedHostError ( 'could not find `mon initial members` defined in ceph.conf' ) else : mon_initial_members = re . split ( r'[,\s]+' , mon_initial_members ) return mon_initial_members
Read the Ceph config file and return the value of mon_initial_members Optionally a NeedHostError can be raised if the value is None .
39,388
def is_running ( conn , args ) : stdout , stderr , _ = remoto . process . check ( conn , args ) result_string = b' ' . join ( stdout ) for run_check in [ b': running' , b' start/running' ] : if run_check in result_string : return True return False
Run a command to check the status of a mon return a boolean .
39,389
def executable_path ( conn , executable ) : executable_path = conn . remote_module . which ( executable ) if not executable_path : raise ExecutableNotFound ( executable , conn . hostname ) return executable_path
Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so .
39,390
def is_systemd_service_enabled ( conn , service = 'ceph' ) : _ , _ , returncode = remoto . process . check ( conn , [ 'systemctl' , 'is-enabled' , '--quiet' , '{service}' . format ( service = service ) , ] ) return returncode == 0
Detects if a systemd service is enabled or not .
39,391
def make ( parser ) : parser . add_argument ( 'repo_name' , metavar = 'REPO-NAME' , help = 'Name of repo to manage. Can match an entry in cephdeploy.conf' ) parser . add_argument ( '--repo-url' , help = 'a repo URL that mirrors/contains Ceph packages' ) parser . add_argument ( '--gpg-url' , help = 'a GPG key URL to be used with custom repos' ) parser . add_argument ( '--remove' , '--delete' , action = 'store_true' , help = 'remove repo definition on remote host' ) parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'host(s) to install on' ) parser . set_defaults ( func = repo )
Repo definition management
39,392
def get_list ( self , section , key ) : value = self . get_safe ( section , key , [ ] ) if value == [ ] : return value value = re . split ( r'\s+#' , value ) [ 0 ] value = value . split ( ',' ) return [ x . strip ( ) for x in value ]
Assumes that the value for a given key is going to be a list separated by commas . It gets rid of trailing comments . If just one item is present it returns a list with a single item if no key is found an empty list is returned .
39,393
def get_default_repo ( self ) : for repo in self . get_repos ( ) : if self . get_safe ( repo , 'default' ) and self . getboolean ( repo , 'default' ) : return repo return False
Go through all the repositories defined in the config file and search for a truthy value for the default key . If there isn t any return None .
39,394
def validate_host_ip ( ips , subnets ) : subnets = [ s for s in subnets if s is not None ] validate_one_subnet = len ( subnets ) == 1 def ip_in_one_subnet ( ips , subnet ) : for ip in ips : if net . ip_in_subnet ( ip , subnet ) : return True return False for subnet in subnets : if ip_in_one_subnet ( ips , subnet ) : if validate_one_subnet : return else : continue else : msg = "subnet (%s) is not valid for any of the ips found %s" % ( subnet , str ( ips ) ) raise RuntimeError ( msg )
Make sure that a given host all subnets specified will have at least one IP in that range .
39,395
def get_public_network_ip ( ips , public_subnet ) : for ip in ips : if net . ip_in_subnet ( ip , public_subnet ) : return ip msg = "IPs (%s) are not valid for any of subnet specified %s" % ( str ( ips ) , str ( public_subnet ) ) raise RuntimeError ( msg )
Given a public subnet chose the one IP from the remote host that exists within the subnet range .
39,396
def make ( parser ) : parser . add_argument ( 'mon' , metavar = 'MON' , nargs = '+' , help = 'initial monitor hostname, fqdn, or hostname:fqdn pair' , type = arg_validators . Hostname ( ) , ) parser . add_argument ( '--no-ssh-copykey' , dest = 'ssh_copykey' , action = 'store_false' , default = True , help = 'do not attempt to copy SSH keys' , ) parser . add_argument ( '--fsid' , dest = 'fsid' , help = 'provide an alternate FSID for ceph.conf generation' , ) parser . add_argument ( '--cluster-network' , help = 'specify the (internal) cluster network' , type = arg_validators . Subnet ( ) , ) parser . add_argument ( '--public-network' , help = 'specify the public network for a cluster' , type = arg_validators . Subnet ( ) , ) parser . set_defaults ( func = new , )
Start deploying a new cluster and write a CLUSTER . conf and keyring for it .
39,397
def make ( parser ) : mds_parser = parser . add_subparsers ( dest = 'subcommand' ) mds_parser . required = True mds_create = mds_parser . add_parser ( 'create' , help = 'Deploy Ceph MDS on remote host(s)' ) mds_create . add_argument ( 'mds' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on' , ) parser . set_defaults ( func = mds , )
Ceph MDS daemon management
39,398
def install_yum_priorities ( distro , _yum = None ) : yum = _yum or pkg_managers . yum package_name = 'yum-plugin-priorities' if distro . normalized_name == 'centos' : if distro . release [ 0 ] != '6' : package_name = 'yum-priorities' yum ( distro . conn , package_name )
EPEL started packaging Ceph so we need to make sure that the ceph . repo we install has a higher priority than the EPEL repo so that when installing Ceph it will come from the repo file we create .
39,399
def make_exception_message ( exc ) : if str ( exc ) : return '%s: %s\n' % ( exc . __class__ . __name__ , exc ) else : return '%s\n' % ( exc . __class__ . __name__ )
An exception is passed in and this function returns the proper string depending on the result so it is readable enough .