idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
39,400
def platform_information ( _linux_distribution = None ) : linux_distribution = _linux_distribution or platform . linux_distribution distro , release , codename = linux_distribution ( ) if not distro : distro , release , codename = parse_os_release ( ) if not codename and 'debian' in distro . lower ( ) : debian_codenames = { '10' : 'buster' , '9' : 'stretch' , '8' : 'jessie' , '7' : 'wheezy' , '6' : 'squeeze' , } major_version = release . split ( '.' ) [ 0 ] codename = debian_codenames . get ( major_version , '' ) if not codename and '/' in release : major , minor = release . split ( '/' ) if minor == 'sid' : codename = minor else : codename = major if not codename and 'oracle' in distro . lower ( ) : codename = 'oracle' if not codename and 'virtuozzo linux' in distro . lower ( ) : codename = 'virtuozzo' if not codename and 'arch' in distro . lower ( ) : codename = 'arch' return ( str ( distro ) . rstrip ( ) , str ( release ) . rstrip ( ) , str ( codename ) . rstrip ( ) )
detect platform information from remote host
39,401
def write_keyring ( path , key , uid = - 1 , gid = - 1 ) : tmp_file = tempfile . NamedTemporaryFile ( 'wb' , delete = False ) tmp_file . write ( key ) tmp_file . close ( ) keyring_dir = os . path . dirname ( path ) if not path_exists ( keyring_dir ) : makedir ( keyring_dir , uid , gid ) shutil . move ( tmp_file . name , path )
create a keyring file
39,402
def create_mon_path ( path , uid = - 1 , gid = - 1 ) : if not os . path . exists ( path ) : os . makedirs ( path ) os . chown ( path , uid , gid )
create the mon path if it does not exist
39,403
def create_done_path ( done_path , uid = - 1 , gid = - 1 ) : with open ( done_path , 'wb' ) : pass os . chown ( done_path , uid , gid )
create a done file to avoid re - doing the mon deployment
39,404
def create_init_path ( init_path , uid = - 1 , gid = - 1 ) : if not os . path . exists ( init_path ) : with open ( init_path , 'wb' ) : pass os . chown ( init_path , uid , gid )
create the init path if it does not exist
39,405
def write_monitor_keyring ( keyring , monitor_keyring , uid = - 1 , gid = - 1 ) : write_file ( keyring , monitor_keyring , 0o600 , None , uid , gid )
create the monitor keyring file
39,406
def which ( executable ) : locations = ( '/usr/local/bin' , '/bin' , '/usr/bin' , '/usr/local/sbin' , '/usr/sbin' , '/sbin' , ) for location in locations : executable_path = os . path . join ( location , executable ) if os . path . exists ( executable_path ) and os . path . isfile ( executable_path ) : return executable_path
find the location of an executable
39,407
def make_mon_removed_dir ( path , file_name ) : try : os . makedirs ( '/var/lib/ceph/mon-removed' ) except OSError as e : if e . errno != errno . EEXIST : raise shutil . move ( path , os . path . join ( '/var/lib/ceph/mon-removed/' , file_name ) )
move old monitor data
39,408
def safe_mkdir ( path , uid = - 1 , gid = - 1 ) : try : os . mkdir ( path ) except OSError as e : if e . errno == errno . EEXIST : pass else : raise else : os . chown ( path , uid , gid )
create path if it doesn t exist
39,409
def safe_makedirs ( path , uid = - 1 , gid = - 1 ) : try : os . makedirs ( path ) except OSError as e : if e . errno == errno . EEXIST : pass else : raise else : os . chown ( path , uid , gid )
create path recursively if it doesn t exist
39,410
def zeroing ( dev ) : lba_size = 4096 size = 33 * lba_size return True with open ( dev , 'wb' ) as f : f . seek ( - size , os . SEEK_END ) f . write ( size * b'\0' )
zeroing last few blocks of device
39,411
def enable_yum_priority_obsoletes ( path = "/etc/yum/pluginconf.d/priorities.conf" ) : config = configparser . ConfigParser ( ) config . read ( path ) config . set ( 'main' , 'check_obsoletes' , '1' ) with open ( path , 'w' ) as fout : config . write ( fout )
Configure Yum priorities to include obsoletes
39,412
def vendorize ( vendor_requirements ) : for library in vendor_requirements : if len ( library ) == 2 : name , version = library cmd = None elif len ( library ) == 3 : name , version , cmd = library vendor_library ( name , version , cmd )
This is the main entry point for vendorizing requirements . It expects a list of tuples that should contain the name of the library and the version .
39,413
def _keyring_equivalent ( keyring_one , keyring_two ) : def keyring_extract_key ( file_path ) : with open ( file_path ) as f : for line in f : content = line . strip ( ) if len ( content ) == 0 : continue split_line = content . split ( '=' ) if split_line [ 0 ] . strip ( ) == 'key' : return "=" . join ( split_line [ 1 : ] ) . strip ( ) raise RuntimeError ( "File '%s' is not a keyring" % file_path ) key_one = keyring_extract_key ( keyring_one ) key_two = keyring_extract_key ( keyring_two ) return key_one == key_two
Check two keyrings are identical
39,414
def keytype_path_to ( args , keytype ) : if keytype == "admin" : return '{cluster}.client.admin.keyring' . format ( cluster = args . cluster ) if keytype == "mon" : return '{cluster}.mon.keyring' . format ( cluster = args . cluster ) return '{cluster}.bootstrap-{what}.keyring' . format ( cluster = args . cluster , what = keytype )
Get the local filename for a keyring type
39,415
def gatherkeys_missing ( args , distro , rlogger , keypath , keytype , dest_dir ) : args_prefix = [ '/usr/bin/ceph' , '--connect-timeout=25' , '--cluster={cluster}' . format ( cluster = args . cluster ) , '--name' , 'mon.' , '--keyring={keypath}' . format ( keypath = keypath ) , ] identity = keytype_identity ( keytype ) if identity is None : raise RuntimeError ( 'Could not find identity for keytype:%s' % keytype ) capabilites = keytype_capabilities ( keytype ) if capabilites is None : raise RuntimeError ( 'Could not find capabilites for keytype:%s' % keytype ) out , err , code = remoto . process . check ( distro . conn , args_prefix + [ 'auth' , 'get' , identity ] ) if code == errno . ENOENT : out , err , code = remoto . process . check ( distro . conn , args_prefix + [ 'auth' , 'get-or-create' , identity ] + capabilites ) if code != 0 : rlogger . error ( '"ceph auth get-or-create for keytype %s returned %s' , keytype , code ) for line in err : rlogger . debug ( line ) return False keyring_name_local = keytype_path_to ( args , keytype ) keyring_path_local = os . path . join ( dest_dir , keyring_name_local ) with open ( keyring_path_local , 'wb' ) as f : for line in out : f . write ( line + b'\n' ) return True
Get or create the keyring from the mon using the mon keyring by keytype and copy to dest_dir
39,416
def gatherkeys_with_mon ( args , host , dest_dir ) : distro = hosts . get ( host , username = args . username ) remote_hostname = distro . conn . remote_module . shortname ( ) dir_keytype_mon = ceph_deploy . util . paths . mon . path ( args . cluster , remote_hostname ) path_keytype_mon = "%s/keyring" % ( dir_keytype_mon ) mon_key = distro . conn . remote_module . get_file ( path_keytype_mon ) if mon_key is None : LOG . warning ( "No mon key found in host: %s" , host ) return False mon_name_local = keytype_path_to ( args , "mon" ) mon_path_local = os . path . join ( dest_dir , mon_name_local ) with open ( mon_path_local , 'wb' ) as f : f . write ( mon_key ) rlogger = logging . getLogger ( host ) path_asok = ceph_deploy . util . paths . mon . asok ( args . cluster , remote_hostname ) out , err , code = remoto . process . check ( distro . conn , [ "/usr/bin/ceph" , "--connect-timeout=25" , "--cluster={cluster}" . format ( cluster = args . cluster ) , "--admin-daemon={asok}" . format ( asok = path_asok ) , "mon_status" ] ) if code != 0 : rlogger . error ( '"ceph mon_status %s" returned %s' , host , code ) for line in err : rlogger . debug ( line ) return False try : mon_status = json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) except ValueError : rlogger . error ( '"ceph mon_status %s" output was not json' , host ) for line in out : rlogger . error ( line ) return False mon_number = None mon_map = mon_status . get ( 'monmap' ) if mon_map is None : rlogger . error ( "could not find mon map for mons on '%s'" , host ) return False mon_quorum = mon_status . get ( 'quorum' ) if mon_quorum is None : rlogger . error ( "could not find quorum for mons on '%s'" , host ) return False mon_map_mons = mon_map . get ( 'mons' ) if mon_map_mons is None : rlogger . error ( "could not find mons in monmap on '%s'" , host ) return False for mon in mon_map_mons : if mon . get ( 'name' ) == remote_hostname : mon_number = mon . get ( 'rank' ) break if mon_number is None : rlogger . error ( "could not find '%s' in monmap" , remote_hostname ) return False if not mon_number in mon_quorum : rlogger . error ( "Not yet quorum for '%s'" , host ) return False for keytype in [ "admin" , "mds" , "mgr" , "osd" , "rgw" ] : if not gatherkeys_missing ( args , distro , rlogger , path_keytype_mon , keytype , dest_dir ) : rlogger . error ( "Failed to return '%s' key from host %s" , keytype , host ) return False return True
Connect to mon and gather keys if mon is in quorum .
39,417
def gatherkeys ( args ) : oldmask = os . umask ( 0o77 ) try : try : tmpd = tempfile . mkdtemp ( ) LOG . info ( "Storing keys in temp directory %s" , tmpd ) sucess = False for host in args . mon : sucess = gatherkeys_with_mon ( args , host , tmpd ) if sucess : break if not sucess : LOG . error ( "Failed to connect to host:%s" , ', ' . join ( args . mon ) ) raise RuntimeError ( 'Failed to connect any mon' ) had_error = False date_string = time . strftime ( "%Y%m%d%H%M%S" ) for keytype in [ "admin" , "mds" , "mgr" , "mon" , "osd" , "rgw" ] : filename = keytype_path_to ( args , keytype ) tmp_path = os . path . join ( tmpd , filename ) if not os . path . exists ( tmp_path ) : LOG . error ( "No key retrived for '%s'" , keytype ) had_error = True continue if not os . path . exists ( filename ) : LOG . info ( "Storing %s" % ( filename ) ) shutil . move ( tmp_path , filename ) continue if _keyring_equivalent ( tmp_path , filename ) : LOG . info ( "keyring '%s' already exists" , filename ) continue backup_keyring = "%s-%s" % ( filename , date_string ) LOG . info ( "Replacing '%s' and backing up old key as '%s'" , filename , backup_keyring ) shutil . copy ( filename , backup_keyring ) shutil . move ( tmp_path , filename ) if had_error : raise RuntimeError ( 'Failed to get all key types' ) finally : LOG . info ( "Destroy temp directory %s" % ( tmpd ) ) shutil . rmtree ( tmpd ) finally : os . umask ( oldmask )
Gather keys from any mon and store in current working directory .
39,418
def make ( parser ) : parser . add_argument ( 'mon' , metavar = 'HOST' , nargs = '+' , help = 'monitor host to pull keys from' , ) parser . set_defaults ( func = gatherkeys , )
Gather authentication keys for provisioning new nodes .
39,419
def get ( hostname , username = None , fallback = None , detect_sudo = True , use_rhceph = False , callbacks = None ) : conn = get_connection ( hostname , username = username , logger = logging . getLogger ( hostname ) , detect_sudo = detect_sudo ) try : conn . import_module ( remotes ) except IOError as error : if 'already closed' in getattr ( error , 'message' , '' ) : raise RuntimeError ( 'remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname ) distro_name , release , codename = conn . remote_module . platform_information ( ) if not codename or not _get_distro ( distro_name ) : raise exc . UnsupportedPlatform ( distro = distro_name , codename = codename , release = release ) machine_type = conn . remote_module . machine_type ( ) module = _get_distro ( distro_name , use_rhceph = use_rhceph ) module . name = distro_name module . normalized_name = _normalized_distro_name ( distro_name ) module . normalized_release = _normalized_release ( release ) module . distro = module . normalized_name module . is_el = module . normalized_name in [ 'redhat' , 'centos' , 'fedora' , 'scientific' , 'oracle' , 'virtuozzo' ] module . is_rpm = module . normalized_name in [ 'redhat' , 'centos' , 'fedora' , 'scientific' , 'suse' , 'oracle' , 'virtuozzo' , 'alt' ] module . is_deb = module . normalized_name in [ 'debian' , 'ubuntu' ] module . is_pkgtarxz = module . normalized_name in [ 'arch' ] module . release = release module . codename = codename module . conn = conn module . machine_type = machine_type module . init = module . choose_init ( module ) module . packager = module . get_packager ( module ) if callbacks : for c in callbacks : c ( module ) return module
Retrieve the module that matches the distribution of a hostname . This function will connect to that host and retrieve the distribution information then return the appropriate module and slap a few attributes to that module defining the information it found from the hostname .
39,420
def get_connection ( hostname , username , logger , threads = 5 , use_sudo = None , detect_sudo = True ) : if username : hostname = "%s@%s" % ( username , hostname ) try : conn = remoto . Connection ( hostname , logger = logger , threads = threads , detect_sudo = detect_sudo , ) conn . global_timeout = 300 logger . debug ( "connected to host: %s " % hostname ) return conn except Exception as error : msg = "connecting to host: %s " % hostname errors = "resulted in errors: %s %s" % ( error . __class__ . __name__ , error ) raise RuntimeError ( msg + errors )
A very simple helper meant to return a connection that will know about the need to use sudo .
39,421
def get_local_connection ( logger , use_sudo = False ) : return get_connection ( socket . gethostname ( ) , None , logger = logger , threads = 1 , use_sudo = use_sudo , detect_sudo = False )
Helper for local connections that are sometimes needed to operate on local hosts
39,422
def make ( parser ) : mgr_parser = parser . add_subparsers ( dest = 'subcommand' ) mgr_parser . required = True mgr_create = mgr_parser . add_parser ( 'create' , help = 'Deploy Ceph MGR on remote host(s)' ) mgr_create . add_argument ( 'mgr' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on' , ) parser . set_defaults ( func = mgr , )
Ceph MGR daemon management
39,423
def make ( parser ) : action = parser . add_mutually_exclusive_group ( ) action . add_argument ( '--install' , metavar = 'PKG(s)' , help = 'Comma-separated package(s) to install' , ) action . add_argument ( '--remove' , metavar = 'PKG(s)' , help = 'Comma-separated package(s) to remove' , ) parser . add_argument ( 'hosts' , nargs = '+' , ) parser . set_defaults ( func = pkg , )
Manage packages on remote hosts .
39,424
def get_bootstrap_osd_key ( cluster ) : path = '{cluster}.bootstrap-osd.keyring' . format ( cluster = cluster ) try : with open ( path , 'rb' ) as f : return f . read ( ) except IOError : raise RuntimeError ( 'bootstrap-osd keyring not found; run \'gatherkeys\'' )
Read the bootstrap - osd key for cluster .
39,425
def create_osd_keyring ( conn , cluster , key ) : logger = conn . logger path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring' . format ( cluster = cluster , ) if not conn . remote_module . path_exists ( path ) : logger . warning ( 'osd keyring does not exist yet, creating one' ) conn . remote_module . write_keyring ( path , key )
Run on osd node writes the bootstrap key if not there yet .
39,426
def osd_tree ( conn , cluster ) : ceph_executable = system . executable_path ( conn , 'ceph' ) command = [ ceph_executable , '--cluster={cluster}' . format ( cluster = cluster ) , 'osd' , 'tree' , '--format=json' , ] out , err , code = remoto . process . check ( conn , command , ) try : loaded_json = json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) for k , v in loaded_json . items ( ) : if v == 'true' : loaded_json [ k ] = True elif v == 'false' : loaded_json [ k ] = False return loaded_json except ValueError : return { }
Check the status of an OSD . Make sure all are up and in
39,427
def catch_osd_errors ( conn , logger , args ) : logger . info ( 'checking OSD status...' ) status = osd_status_check ( conn , args . cluster ) osds = int ( status . get ( 'num_osds' , 0 ) ) up_osds = int ( status . get ( 'num_up_osds' , 0 ) ) in_osds = int ( status . get ( 'num_in_osds' , 0 ) ) full = status . get ( 'full' , False ) nearfull = status . get ( 'nearfull' , False ) if osds > up_osds : difference = osds - up_osds logger . warning ( 'there %s %d OSD%s down' % ( [ 'is' , 'are' ] [ difference != 1 ] , difference , "s" [ difference == 1 : ] ) ) if osds > in_osds : difference = osds - in_osds logger . warning ( 'there %s %d OSD%s out' % ( [ 'is' , 'are' ] [ difference != 1 ] , difference , "s" [ difference == 1 : ] ) ) if full : logger . warning ( 'OSDs are full!' ) if nearfull : logger . warning ( 'OSDs are near full!' )
Look for possible issues when checking the status of an OSD and report them back to the user .
39,428
def create_osd ( conn , cluster , data , journal , zap , fs_type , dmcrypt , dmcrypt_dir , storetype , block_wal , block_db , ** kw ) : ceph_volume_executable = system . executable_path ( conn , 'ceph-volume' ) args = [ ceph_volume_executable , '--cluster' , cluster , 'lvm' , 'create' , '--%s' % storetype , '--data' , data ] if zap : LOG . warning ( 'zapping is no longer supported when preparing' ) if dmcrypt : args . append ( '--dmcrypt' ) LOG . warning ( 'dmcrypt is currently not supported' ) if storetype == 'bluestore' : if block_wal : args . append ( '--block.wal' ) args . append ( block_wal ) if block_db : args . append ( '--block.db' ) args . append ( block_db ) elif storetype == 'filestore' : if not journal : raise RuntimeError ( 'A journal lv or GPT partition must be specified when using filestore' ) args . append ( '--journal' ) args . append ( journal ) if kw . get ( 'debug' ) : remoto . process . run ( conn , args , extend_env = { 'CEPH_VOLUME_DEBUG' : '1' } ) else : remoto . process . run ( conn , args )
Run on osd node creates an OSD from a data disk .
39,429
def make ( parser ) : sub_command_help = dedent ( ) parser . formatter_class = argparse . RawDescriptionHelpFormatter parser . description = sub_command_help osd_parser = parser . add_subparsers ( dest = 'subcommand' ) osd_parser . required = True osd_list = osd_parser . add_parser ( 'list' , help = 'List OSD info from remote host(s)' ) osd_list . add_argument ( 'host' , nargs = '+' , metavar = 'HOST' , help = 'remote host(s) to list OSDs from' ) osd_list . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) osd_create = osd_parser . add_parser ( 'create' , help = 'Create new Ceph OSD daemon by preparing and activating a device' ) osd_create . add_argument ( '--data' , metavar = 'DATA' , help = 'The OSD data logical volume (vg/lv) or absolute path to device' ) osd_create . add_argument ( '--journal' , help = 'Logical Volume (vg/lv) or path to GPT partition' , ) osd_create . add_argument ( '--zap-disk' , action = 'store_true' , help = 'DEPRECATED - cannot zap when creating an OSD' ) osd_create . add_argument ( '--fs-type' , metavar = 'FS_TYPE' , choices = [ 'xfs' , 'btrfs' ] , default = 'xfs' , help = 'filesystem to use to format DEVICE (xfs, btrfs)' , ) osd_create . add_argument ( '--dmcrypt' , action = 'store_true' , help = 'use dm-crypt on DEVICE' , ) osd_create . add_argument ( '--dmcrypt-key-dir' , metavar = 'KEYDIR' , default = '/etc/ceph/dmcrypt-keys' , help = 'directory where dm-crypt keys are stored' , ) osd_create . add_argument ( '--filestore' , action = 'store_true' , default = None , help = 'filestore objectstore' , ) osd_create . add_argument ( '--bluestore' , action = 'store_true' , default = None , help = 'bluestore objectstore' , ) osd_create . add_argument ( '--block-db' , default = None , help = 'bluestore block.db path' ) osd_create . add_argument ( '--block-wal' , default = None , help = 'bluestore block.wal path' ) osd_create . add_argument ( 'host' , nargs = '?' , metavar = 'HOST' , help = 'Remote host to connect' ) osd_create . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) parser . set_defaults ( func = osd , )
Prepare a data disk on remote host .
39,430
def make_disk ( parser ) : disk_parser = parser . add_subparsers ( dest = 'subcommand' ) disk_parser . required = True disk_zap = disk_parser . add_parser ( 'zap' , help = 'destroy existing data and filesystem on LV or partition' , ) disk_zap . add_argument ( 'host' , nargs = '?' , metavar = 'HOST' , help = 'Remote HOST(s) to connect' ) disk_zap . add_argument ( 'disk' , nargs = '+' , metavar = 'DISK' , help = 'Disk(s) to zap' ) disk_zap . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) disk_list = disk_parser . add_parser ( 'list' , help = 'List disk info from remote host(s)' ) disk_list . add_argument ( 'host' , nargs = '+' , metavar = 'HOST' , help = 'Remote HOST(s) to list OSDs from' ) disk_list . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) parser . set_defaults ( func = disk , )
Manage disks on a remote host .
39,431
def repository_url_part ( distro ) : if distro . normalized_release . int_major >= 6 : if distro . normalized_name == 'redhat' : return 'rhel' + distro . normalized_release . major if distro . normalized_name in [ 'centos' , 'scientific' , 'oracle' , 'virtuozzo' ] : return 'el' + distro . normalized_release . major return 'el6'
Historically everything CentOS RHEL and Scientific has been mapped to el6 urls but as we are adding repositories for rhel the URLs should map correctly to say rhel6 or rhel7 .
39,432
def sanitize_args ( args ) : if args . release is None : args . release = 'nautilus' args . default_release = True if args . stable is not None : LOG . warning ( 'the --stable flag is deprecated, use --release instead' ) args . release = args . stable return args
args may need a bunch of logic to set proper defaults that argparse is not well suited for .
39,433
def should_use_custom_repo ( args , cd_conf , repo_url ) : if repo_url : return False if cd_conf : if cd_conf . has_repos : has_valid_release = args . release in cd_conf . get_repos ( ) has_default_repo = cd_conf . get_default_repo ( ) if has_valid_release or has_default_repo : return True return False
A boolean to determine the logic needed to proceed with a custom repo installation instead of cramming everything nect to the logic operator .
39,434
def make_uninstall ( parser ) : parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'hosts to uninstall Ceph from' , ) parser . set_defaults ( func = uninstall , )
Remove Ceph packages from remote hosts .
39,435
def make_purge ( parser ) : parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'hosts to purge Ceph from' , ) parser . set_defaults ( func = purge , )
Remove Ceph packages from remote hosts and purge all data .
39,436
def make ( parser ) : rgw_parser = parser . add_subparsers ( dest = 'subcommand' ) rgw_parser . required = True rgw_create = rgw_parser . add_parser ( 'create' , help = 'Create an RGW instance' ) rgw_create . add_argument ( 'rgw' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on. \ NAME is automatically prefixed with \'rgw.\'' , ) parser . set_defaults ( func = rgw , )
Ceph RGW daemon management
39,437
def can_connect_passwordless ( hostname ) : if not remoto . backends . needs_ssh ( hostname ) : return True logger = logging . getLogger ( hostname ) with get_local_connection ( logger ) as conn : command = [ 'ssh' , '-CT' , '-o' , 'BatchMode=yes' , hostname , 'true' ] out , err , retval = remoto . process . check ( conn , command , stop_on_error = False ) permission_denied_error = 'Permission denied ' host_key_verify_error = 'Host key verification failed.' has_key_error = False for line in err : if permission_denied_error in line or host_key_verify_error in line : has_key_error = True if retval == 255 and has_key_error : return False return True
Ensure that current host can SSH remotely to the remote host using the BatchMode option to prevent a password prompt .
39,438
def ip_in_subnet ( ip , subnet ) : ipaddr = int ( '' . join ( [ '%02x' % int ( x ) for x in ip . split ( '.' ) ] ) , 16 ) netstr , bits = subnet . split ( '/' ) netaddr = int ( '' . join ( [ '%02x' % int ( x ) for x in netstr . split ( '.' ) ] ) , 16 ) mask = ( 0xffffffff << ( 32 - int ( bits ) ) ) & 0xffffffff return ( ipaddr & mask ) == ( netaddr & mask )
Does IP exists in a given subnet utility . Returns a boolean
39,439
def in_subnet ( cidr , addrs = None ) : for address in addrs : if ip_in_subnet ( address , cidr ) : return True return False
Returns True if host is within specified subnet otherwise False
39,440
def get_chacra_repo ( shaman_url ) : shaman_response = get_request ( shaman_url ) chacra_url = shaman_response . geturl ( ) chacra_response = get_request ( chacra_url ) return chacra_response . read ( )
From a Shaman URL get the chacra url for a repository read the contents that point to the repo and return it as a string .
39,441
def map_components ( notsplit_packages , components ) : packages = set ( ) for c in components : if c in notsplit_packages : packages . add ( 'ceph' ) else : packages . add ( c ) return list ( packages )
Returns a list of packages to install based on component names
39,442
def start_mon_service ( distro , cluster , hostname ) : if distro . init == 'sysvinit' : service = distro . conn . remote_module . which_service ( ) remoto . process . run ( distro . conn , [ service , 'ceph' , '-c' , '/etc/ceph/{cluster}.conf' . format ( cluster = cluster ) , 'start' , 'mon.{hostname}' . format ( hostname = hostname ) ] , timeout = 7 , ) system . enable_service ( distro . conn ) elif distro . init == 'upstart' : remoto . process . run ( distro . conn , [ 'initctl' , 'emit' , 'ceph-mon' , 'cluster={cluster}' . format ( cluster = cluster ) , 'id={hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , ) elif distro . init == 'systemd' : remoto . process . run ( distro . conn , [ 'systemctl' , 'enable' , 'ceph.target' ] , timeout = 7 , ) remoto . process . run ( distro . conn , [ 'systemctl' , 'enable' , 'ceph-mon@{hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , ) remoto . process . run ( distro . conn , [ 'systemctl' , 'start' , 'ceph-mon@{hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , )
start mon service depending on distro init
39,443
def __voronoi_finite_polygons_2d ( vor , radius = None ) : if vor . points . shape [ 1 ] != 2 : raise ValueError ( "Requires 2D input" ) new_regions = [ ] new_vertices = vor . vertices . tolist ( ) center = vor . points . mean ( axis = 0 ) if radius is None : radius = vor . points . ptp ( ) . max ( ) all_ridges = { } for ( p1 , p2 ) , ( v1 , v2 ) in zip ( vor . ridge_points , vor . ridge_vertices ) : all_ridges . setdefault ( p1 , [ ] ) . append ( ( p2 , v1 , v2 ) ) all_ridges . setdefault ( p2 , [ ] ) . append ( ( p1 , v1 , v2 ) ) for p1 , region in enumerate ( vor . point_region ) : vertices = vor . regions [ region ] if all ( v >= 0 for v in vertices ) : new_regions . append ( vertices ) continue if p1 not in all_ridges : continue ridges = all_ridges [ p1 ] new_region = [ v for v in vertices if v >= 0 ] for p2 , v1 , v2 in ridges : if v2 < 0 : v1 , v2 = v2 , v1 if v1 >= 0 : continue t = vor . points [ p2 ] - vor . points [ p1 ] t /= np . linalg . norm ( t ) n = np . array ( [ - t [ 1 ] , t [ 0 ] ] ) midpoint = vor . points [ [ p1 , p2 ] ] . mean ( axis = 0 ) direction = np . sign ( np . dot ( midpoint - center , n ) ) * n far_point = vor . vertices [ v2 ] + direction * radius new_region . append ( len ( new_vertices ) ) new_vertices . append ( far_point . tolist ( ) ) vs = np . asarray ( [ new_vertices [ v ] for v in new_region ] ) c = vs . mean ( axis = 0 ) angles = np . arctan2 ( vs [ : , 1 ] - c [ 1 ] , vs [ : , 0 ] - c [ 0 ] ) new_region = np . array ( new_region ) [ np . argsort ( angles ) ] new_regions . append ( new_region . tolist ( ) ) return new_regions , np . asarray ( new_vertices )
Reconstruct infinite voronoi regions in a 2D diagram to finite regions .
39,444
def inline ( width = 900 ) : from IPython . display import Image , HTML , display , clear_output import random import string import urllib import os while True : fname = '' . join ( random . choice ( string . ascii_uppercase + string . digits ) for _ in range ( 32 ) ) if not os . path . isfile ( fname + '.png' ) : break savefig ( fname ) if os . path . isfile ( fname + '.png' ) : with open ( fname + '.png' , 'rb' ) as fin : encoded = base64 . b64encode ( fin . read ( ) ) b64 = urllib . parse . quote ( encoded ) image_html = "<img style='width: %dpx; margin: 0px; float: left; border: 1px solid black;' src='data:image/png;base64,%s' />" % ( width , b64 ) display ( HTML ( image_html ) ) os . remove ( fname + '.png' )
display the map inline in ipython
39,445
def dot ( data , color = None , point_size = 2 , f_tooltip = None ) : from geoplotlib . layers import DotDensityLayer _global_config . layers . append ( DotDensityLayer ( data , color = color , point_size = point_size , f_tooltip = f_tooltip ) )
Create a dot density map
39,446
def hist ( data , cmap = 'hot' , alpha = 220 , colorscale = 'sqrt' , binsize = 16 , show_tooltip = False , scalemin = 0 , scalemax = None , f_group = None , show_colorbar = True ) : from geoplotlib . layers import HistogramLayer _global_config . layers . append ( HistogramLayer ( data , cmap = cmap , alpha = alpha , colorscale = colorscale , binsize = binsize , show_tooltip = show_tooltip , scalemin = scalemin , scalemax = scalemax , f_group = f_group , show_colorbar = show_colorbar ) )
Create a 2D histogram
39,447
def shapefiles ( fname , f_tooltip = None , color = None , linewidth = 3 , shape_type = 'full' ) : from geoplotlib . layers import ShapefileLayer _global_config . layers . append ( ShapefileLayer ( fname , f_tooltip , color , linewidth , shape_type ) )
Load and draws shapefiles
39,448
def voronoi ( data , line_color = None , line_width = 2 , f_tooltip = None , cmap = None , max_area = 1e4 , alpha = 220 ) : from geoplotlib . layers import VoronoiLayer _global_config . layers . append ( VoronoiLayer ( data , line_color , line_width , f_tooltip , cmap , max_area , alpha ) )
Draw the voronoi tesselation of the points
39,449
def delaunay ( data , line_color = None , line_width = 2 , cmap = None , max_lenght = 100 ) : from geoplotlib . layers import DelaunayLayer _global_config . layers . append ( DelaunayLayer ( data , line_color , line_width , cmap , max_lenght ) )
Draw a delaunay triangulation of the points
39,450
def convexhull ( data , col , fill = True , point_size = 4 ) : from geoplotlib . layers import ConvexHullLayer _global_config . layers . append ( ConvexHullLayer ( data , col , fill , point_size ) )
Convex hull for a set of points
39,451
def kde ( data , bw , cmap = 'hot' , method = 'hist' , scaling = 'sqrt' , alpha = 220 , cut_below = None , clip_above = None , binsize = 1 , cmap_levels = 10 , show_colorbar = False ) : from geoplotlib . layers import KDELayer _global_config . layers . append ( KDELayer ( data , bw , cmap , method , scaling , alpha , cut_below , clip_above , binsize , cmap_levels , show_colorbar ) )
Kernel density estimation visualization
39,452
def labels ( data , label_column , color = None , font_name = FONT_NAME , font_size = 14 , anchor_x = 'left' , anchor_y = 'top' ) : from geoplotlib . layers import LabelsLayer _global_config . layers . append ( LabelsLayer ( data , label_column , color , font_name , font_size , anchor_x , anchor_y ) )
Draw a text label for each sample
39,453
def set_map_alpha ( alpha ) : if alpha < 0 or alpha > 255 : raise Exception ( 'invalid alpha ' + str ( alpha ) ) _global_config . map_alpha = alpha
Alpha color of the map tiles
39,454
def read_csv ( fname ) : values = defaultdict ( list ) with open ( fname ) as f : reader = csv . DictReader ( f ) for row in reader : for ( k , v ) in row . items ( ) : values [ k ] . append ( v ) npvalues = { k : np . array ( values [ k ] ) for k in values . keys ( ) } for k in npvalues . keys ( ) : for datatype in [ np . int , np . float ] : try : npvalues [ k ] [ : 1 ] . astype ( datatype ) npvalues [ k ] = npvalues [ k ] . astype ( datatype ) break except : pass dao = DataAccessObject ( npvalues ) return dao
Read a csv file into a DataAccessObject
39,455
def head ( self , n ) : return DataAccessObject ( { k : self . dict [ k ] [ : n ] for k in self . dict } )
Return a DataAccessObject containing the first n rows
39,456
def from_points ( lons , lats ) : north , west = max ( lats ) , min ( lons ) south , east = min ( lats ) , max ( lons ) return BoundingBox ( north = north , west = west , south = south , east = east )
Compute the BoundingBox from a set of latitudes and longitudes
39,457
def from_bboxes ( bboxes ) : north = max ( [ b . north for b in bboxes ] ) south = min ( [ b . south for b in bboxes ] ) west = min ( [ b . west for b in bboxes ] ) east = max ( [ b . east for b in bboxes ] ) return BoundingBox ( north = north , west = west , south = south , east = east )
Compute a BoundingBox enclosing all specified bboxes
39,458
def get_pydoc_completions ( modulename ) : modulename = compat . ensure_not_unicode ( modulename ) modulename = modulename . rstrip ( "." ) if modulename == "" : return sorted ( get_modules ( ) ) candidates = get_completions ( modulename ) if candidates : return sorted ( candidates ) needle = modulename if "." in needle : modulename , part = needle . rsplit ( "." , 1 ) candidates = get_completions ( modulename ) else : candidates = get_modules ( ) return sorted ( candidate for candidate in candidates if candidate . startswith ( needle ) )
Get possible completions for modulename for pydoc .
39,459
def get_modules ( modulename = None ) : modulename = compat . ensure_not_unicode ( modulename ) if not modulename : try : return ( [ modname for ( importer , modname , ispkg ) in iter_modules ( ) if not modname . startswith ( "_" ) ] + list ( sys . builtin_module_names ) ) except OSError : return list ( sys . builtin_module_names ) try : module = safeimport ( modulename ) except ErrorDuringImport : return [ ] if module is None : return [ ] if hasattr ( module , "__path__" ) : return [ modname for ( importer , modname , ispkg ) in iter_modules ( module . __path__ ) if not modname . startswith ( "_" ) ] return [ ]
Return a list of modules and packages under modulename .
39,460
def read_json ( self ) : line = self . stdin . readline ( ) if line == '' : raise EOFError ( ) return json . loads ( line )
Read a single line and decode it as JSON .
39,461
def write_json ( self , ** kwargs ) : self . stdout . write ( json . dumps ( kwargs ) + "\n" ) self . stdout . flush ( )
Write an JSON object on a single line .
39,462
def handle_request ( self ) : request = self . read_json ( ) if 'method' not in request : raise ValueError ( "Received a bad request: {0}" . format ( request ) ) method_name = request [ 'method' ] request_id = request . get ( 'id' , None ) params = request . get ( 'params' ) or [ ] try : method = getattr ( self , "rpc_" + method_name , None ) if method is not None : result = method ( * params ) else : result = self . handle ( method_name , params ) if request_id is not None : self . write_json ( result = result , id = request_id ) except Fault as fault : error = { "message" : fault . message , "code" : fault . code } if fault . data is not None : error [ "data" ] = fault . data self . write_json ( error = error , id = request_id ) except Exception as e : error = { "message" : str ( e ) , "code" : 500 , "data" : { "traceback" : traceback . format_exc ( ) } } self . write_json ( error = error , id = request_id )
Handle a single JSON - RPC request .
39,463
def get_source ( fileobj ) : if not isinstance ( fileobj , dict ) : return fileobj else : try : with io . open ( fileobj [ "filename" ] , encoding = "utf-8" , errors = "ignore" ) as f : return f . read ( ) finally : if fileobj . get ( 'delete_after_use' ) : try : os . remove ( fileobj [ "filename" ] ) except : pass
Translate fileobj into file contents .
39,464
def _call_backend ( self , method , default , * args , ** kwargs ) : meth = getattr ( self . backend , method , None ) if meth is None : return default else : return meth ( * args , ** kwargs )
Call the backend method with args .
39,465
def rpc_get_calltip ( self , filename , source , offset ) : return self . _call_backend ( "rpc_get_calltip" , None , filename , get_source ( source ) , offset )
Get the calltip for the function at the offset .
39,466
def rpc_get_oneline_docstring ( self , filename , source , offset ) : return self . _call_backend ( "rpc_get_oneline_docstring" , None , filename , get_source ( source ) , offset )
Get a oneline docstring for the symbol at the offset .
39,467
def rpc_get_completions ( self , filename , source , offset ) : results = self . _call_backend ( "rpc_get_completions" , [ ] , filename , get_source ( source ) , offset ) results = list ( dict ( ( res [ 'name' ] , res ) for res in results ) . values ( ) ) results . sort ( key = lambda cand : _pysymbol_key ( cand [ "name" ] ) ) return results
Get a list of completion candidates for the symbol at offset .
39,468
def rpc_get_definition ( self , filename , source , offset ) : return self . _call_backend ( "rpc_get_definition" , None , filename , get_source ( source ) , offset )
Get the location of the definition for the symbol at the offset .
39,469
def rpc_get_assignment ( self , filename , source , offset ) : return self . _call_backend ( "rpc_get_assignment" , None , filename , get_source ( source ) , offset )
Get the location of the assignment for the symbol at the offset .
39,470
def rpc_get_docstring ( self , filename , source , offset ) : return self . _call_backend ( "rpc_get_docstring" , None , filename , get_source ( source ) , offset )
Get the docstring for the symbol at the offset .
39,471
def rpc_get_pydoc_documentation ( self , symbol ) : try : docstring = pydoc . render_doc ( str ( symbol ) , "Elpy Pydoc Documentation for %s" , False ) except ( ImportError , pydoc . ErrorDuringImport ) : return None else : if isinstance ( docstring , bytes ) : docstring = docstring . decode ( "utf-8" , "replace" ) return docstring
Get the Pydoc documentation for the given symbol .
39,472
def rpc_get_refactor_options ( self , filename , start , end = None ) : try : from elpy import refactor except : raise ImportError ( "Rope not installed, refactorings unavailable" ) ref = refactor . Refactor ( self . project_root , filename ) return ref . get_refactor_options ( start , end )
Return a list of possible refactoring options .
39,473
def rpc_refactor ( self , filename , method , args ) : try : from elpy import refactor except : raise ImportError ( "Rope not installed, refactorings unavailable" ) if args is None : args = ( ) ref = refactor . Refactor ( self . project_root , filename ) return ref . get_changes ( method , * args )
Return a list of changes from the refactoring action .
39,474
def rpc_get_names ( self , filename , source , offset ) : source = get_source ( source ) if hasattr ( self . backend , "rpc_get_names" ) : return self . backend . rpc_get_names ( filename , source , offset ) else : raise Fault ( "get_names not implemented by current backend" , code = 400 )
Get all possible names
39,475
def pos_to_linecol ( text , pos ) : line_start = text . rfind ( "\n" , 0 , pos ) + 1 line = text . count ( "\n" , 0 , line_start ) + 1 col = pos - line_start return line , col
Return a tuple of line and column for offset pos in text .
39,476
def linecol_to_pos ( text , line , col ) : nth_newline_offset = 0 for i in range ( line - 1 ) : new_offset = text . find ( "\n" , nth_newline_offset ) if new_offset < 0 : raise ValueError ( "Text does not have {0} lines." . format ( line ) ) nth_newline_offset = new_offset + 1 offset = nth_newline_offset + col if offset > len ( text ) : raise ValueError ( "Line {0} column {1} is not within the text" . format ( line , col ) ) return offset
Return the offset of this line and column in text .
39,477
def rpc_get_oneline_docstring ( self , filename , source , offset ) : line , column = pos_to_linecol ( source , offset ) definitions = run_with_debug ( jedi , 'goto_definitions' , source = source , line = line , column = column , path = filename , encoding = 'utf-8' ) assignments = run_with_debug ( jedi , 'goto_assignments' , source = source , line = line , column = column , path = filename , encoding = 'utf-8' ) if definitions : definition = definitions [ 0 ] else : definition = None if assignments : assignment = assignments [ 0 ] else : assignment = None if definition : if definition . type in [ 'function' , 'class' ] : raw_name = definition . name name = '{}()' . format ( raw_name ) doc = definition . docstring ( ) . split ( '\n' ) elif definition . type in [ 'module' ] : raw_name = definition . name name = '{} {}' . format ( raw_name , definition . type ) doc = definition . docstring ( ) . split ( '\n' ) elif ( definition . type in [ 'instance' ] and hasattr ( assignment , "name" ) ) : raw_name = assignment . name name = raw_name doc = assignment . docstring ( ) . split ( '\n' ) else : return None lines = [ ] call = "{}(" . format ( raw_name ) doc . append ( '' ) for i in range ( len ( doc ) ) : if doc [ i ] == '' and len ( lines ) != 0 : paragraph = " " . join ( lines ) lines = [ ] if call != paragraph [ 0 : len ( call ) ] : break paragraph = "" continue lines . append ( doc [ i ] ) onelinedoc = paragraph . split ( '. ' , 1 ) if len ( onelinedoc ) == 2 : onelinedoc = onelinedoc [ 0 ] + '.' else : onelinedoc = onelinedoc [ 0 ] if onelinedoc == '' : onelinedoc = "No documentation" return { "name" : name , "doc" : onelinedoc } return None
Return a oneline docstring for the symbol at offset
39,478
def rpc_get_usages ( self , filename , source , offset ) : line , column = pos_to_linecol ( source , offset ) uses = run_with_debug ( jedi , 'usages' , source = source , line = line , column = column , path = filename , encoding = 'utf-8' ) if uses is None : return None result = [ ] for use in uses : if use . module_path == filename : offset = linecol_to_pos ( source , use . line , use . column ) elif use . module_path is not None : with open ( use . module_path ) as f : text = f . read ( ) offset = linecol_to_pos ( text , use . line , use . column ) result . append ( { "name" : use . name , "filename" : use . module_path , "offset" : offset } ) return result
Return the uses of the symbol at offset .
39,479
def rpc_get_names ( self , filename , source , offset ) : names = jedi . api . names ( source = source , path = filename , encoding = 'utf-8' , all_scopes = True , definitions = True , references = True ) result = [ ] for name in names : if name . module_path == filename : offset = linecol_to_pos ( source , name . line , name . column ) elif name . module_path is not None : with open ( name . module_path ) as f : text = f . read ( ) offset = linecol_to_pos ( text , name . line , name . column ) result . append ( { "name" : name . name , "filename" : name . module_path , "offset" : offset } ) return result
Return the list of possible names
39,480
def options ( description , ** kwargs ) : def set_notes ( function ) : function . refactor_notes = { 'name' : function . __name__ , 'category' : "Miscellaneous" , 'description' : description , 'doc' : getattr ( function , '__doc__' , '' ) , 'args' : [ ] } function . refactor_notes . update ( kwargs ) return function return set_notes
Decorator to set some options on a method .
39,481
def translate_changes ( initial_change ) : agenda = [ initial_change ] result = [ ] while agenda : change = agenda . pop ( 0 ) if isinstance ( change , rope_change . ChangeSet ) : agenda . extend ( change . changes ) elif isinstance ( change , rope_change . ChangeContents ) : result . append ( { 'action' : 'change' , 'file' : change . resource . real_path , 'contents' : change . new_contents , 'diff' : change . get_description ( ) } ) elif isinstance ( change , rope_change . CreateFile ) : result . append ( { 'action' : 'create' , 'type' : 'file' , 'file' : change . resource . real_path } ) elif isinstance ( change , rope_change . CreateFolder ) : result . append ( { 'action' : 'create' , 'type' : 'directory' , 'path' : change . resource . real_path } ) elif isinstance ( change , rope_change . MoveResource ) : result . append ( { 'action' : 'move' , 'type' : ( 'directory' if change . new_resource . is_folder ( ) else 'file' ) , 'source' : change . resource . real_path , 'destination' : change . new_resource . real_path } ) elif isinstance ( change , rope_change . RemoveResource ) : if change . resource . is_folder ( ) : result . append ( { 'action' : 'delete' , 'type' : 'directory' , 'path' : change . resource . real_path } ) else : result . append ( { 'action' : 'delete' , 'type' : 'file' , 'file' : change . resource . real_path } ) return result
Translate rope . base . change . Change instances to dictionaries .
39,482
def get_refactor_options ( self , start , end = None ) : result = [ ] for symbol in dir ( self ) : if not symbol . startswith ( "refactor_" ) : continue method = getattr ( self , symbol ) if not method . refactor_notes . get ( 'available' , True ) : continue category = method . refactor_notes [ 'category' ] if end is not None and category != 'Region' : continue if end is None and category == 'Region' : continue is_on_symbol = self . _is_on_symbol ( start ) if not is_on_symbol and category in ( 'Symbol' , 'Method' ) : continue requires_import = method . refactor_notes . get ( 'only_on_imports' , False ) if requires_import and not self . _is_on_import_statement ( start ) : continue result . append ( method . refactor_notes ) return result
Return a list of options for refactoring at the given position .
39,483
def _is_on_import_statement ( self , offset ) : "Does this offset point to an import statement?" data = self . resource . read ( ) bol = data . rfind ( "\n" , 0 , offset ) + 1 eol = data . find ( "\n" , 0 , bol ) if eol == - 1 : eol = len ( data ) line = data [ bol : eol ] line = line . strip ( ) if line . startswith ( "import " ) or line . startswith ( "from " ) : return True else : return False
Does this offset point to an import statement?
39,484
def _is_on_symbol ( self , offset ) : "Is this offset on a symbol?" if not ROPE_AVAILABLE : return False data = self . resource . read ( ) if offset >= len ( data ) : return False if data [ offset ] != '_' and not data [ offset ] . isalnum ( ) : return False word = worder . get_name_at ( self . resource , offset ) if word : return True else : return False
Is this offset on a symbol?
39,485
def get_changes ( self , name , * args ) : if not name . startswith ( "refactor_" ) : raise ValueError ( "Bad refactoring name {0}" . format ( name ) ) method = getattr ( self , name ) if not method . refactor_notes . get ( 'available' , True ) : raise RuntimeError ( "Method not available" ) return method ( * args )
Return a list of changes for the named refactoring action .
39,486
def refactor_froms_to_imports ( self , offset ) : refactor = ImportOrganizer ( self . project ) changes = refactor . froms_to_imports ( self . resource , offset ) return translate_changes ( changes )
Converting imports of the form from ... to import ... .
39,487
def refactor_organize_imports ( self ) : refactor = ImportOrganizer ( self . project ) changes = refactor . organize_imports ( self . resource ) return translate_changes ( changes )
Clean up and organize imports .
39,488
def refactor_module_to_package ( self ) : refactor = ModuleToPackage ( self . project , self . resource ) return self . _get_changes ( refactor )
Convert the current module into a package .
39,489
def refactor_rename_at_point ( self , offset , new_name , in_hierarchy , docs ) : try : refactor = Rename ( self . project , self . resource , offset ) except RefactoringError as e : raise Fault ( str ( e ) , code = 400 ) return self . _get_changes ( refactor , new_name , in_hierarchy = in_hierarchy , docs = docs )
Rename the symbol at point .
39,490
def refactor_rename_current_module ( self , new_name ) : refactor = Rename ( self . project , self . resource , None ) return self . _get_changes ( refactor , new_name )
Rename the current module .
39,491
def refactor_move_module ( self , new_name ) : refactor = create_move ( self . project , self . resource ) resource = path_to_resource ( self . project , new_name ) return self . _get_changes ( refactor , resource )
Move the current module .
39,492
def refactor_create_inline ( self , offset , only_this ) : refactor = create_inline ( self . project , self . resource , offset ) if only_this : return self . _get_changes ( refactor , remove = False , only_current = True ) else : return self . _get_changes ( refactor , remove = True , only_current = False )
Inline the function call at point .
39,493
def refactor_extract_method ( self , start , end , name , make_global ) : refactor = ExtractMethod ( self . project , self . resource , start , end ) return self . _get_changes ( refactor , name , similar = True , global_ = make_global )
Extract region as a method .
39,494
def refactor_use_function ( self , offset ) : try : refactor = UseFunction ( self . project , self . resource , offset ) except RefactoringError as e : raise Fault ( 'Refactoring error: {}' . format ( e ) , code = 400 ) return self . _get_changes ( refactor )
Use the function at point wherever possible .
39,495
def generate_payload ( self , command , data = None ) : json_data = payload_dict [ self . dev_type ] [ command ] [ 'command' ] if 'gwId' in json_data : json_data [ 'gwId' ] = self . id if 'devId' in json_data : json_data [ 'devId' ] = self . id if 'uid' in json_data : json_data [ 'uid' ] = self . id if 't' in json_data : json_data [ 't' ] = str ( int ( time . time ( ) ) ) if data is not None : json_data [ 'dps' ] = data json_payload = json . dumps ( json_data ) json_payload = json_payload . replace ( ' ' , '' ) json_payload = json_payload . encode ( 'utf-8' ) log . debug ( 'json_payload=%r' , json_payload ) if command == SET : self . cipher = AESCipher ( self . local_key ) json_payload = self . cipher . encrypt ( json_payload ) preMd5String = b'data=' + json_payload + b'||lpv=' + PROTOCOL_VERSION_BYTES + b'||' + self . local_key m = md5 ( ) m . update ( preMd5String ) hexdigest = m . hexdigest ( ) json_payload = PROTOCOL_VERSION_BYTES + hexdigest [ 8 : ] [ : 16 ] . encode ( 'latin1' ) + json_payload self . cipher = None postfix_payload = hex2bin ( bin2hex ( json_payload ) + payload_dict [ self . dev_type ] [ 'suffix' ] ) assert len ( postfix_payload ) <= 0xff postfix_payload_hex_len = '%x' % len ( postfix_payload ) buffer = hex2bin ( payload_dict [ self . dev_type ] [ 'prefix' ] + payload_dict [ self . dev_type ] [ command ] [ 'hexByte' ] + '000000' + postfix_payload_hex_len ) + postfix_payload return buffer
Generate the payload to send .
39,496
def set_colour ( self , r , g , b ) : if not 0 <= r <= 255 : raise ValueError ( "The value for red needs to be between 0 and 255." ) if not 0 <= g <= 255 : raise ValueError ( "The value for green needs to be between 0 and 255." ) if not 0 <= b <= 255 : raise ValueError ( "The value for blue needs to be between 0 and 255." ) hexvalue = BulbDevice . _rgb_to_hexvalue ( r , g , b ) payload = self . generate_payload ( SET , { self . DPS_INDEX_MODE : self . DPS_MODE_COLOUR , self . DPS_INDEX_COLOUR : hexvalue } ) data = self . _send_receive ( payload ) return data
Set colour of an rgb bulb .
39,497
def set_white ( self , brightness , colourtemp ) : if not 25 <= brightness <= 255 : raise ValueError ( "The brightness needs to be between 25 and 255." ) if not 0 <= colourtemp <= 255 : raise ValueError ( "The colour temperature needs to be between 0 and 255." ) payload = self . generate_payload ( SET , { self . DPS_INDEX_MODE : self . DPS_MODE_WHITE , self . DPS_INDEX_BRIGHTNESS : brightness , self . DPS_INDEX_COLOURTEMP : colourtemp } ) data = self . _send_receive ( payload ) return data
Set white coloured theme of an rgb bulb .
39,498
def set_brightness ( self , brightness ) : if not 25 <= brightness <= 255 : raise ValueError ( "The brightness needs to be between 25 and 255." ) payload = self . generate_payload ( SET , { self . DPS_INDEX_BRIGHTNESS : brightness } ) data = self . _send_receive ( payload ) return data
Set the brightness value of an rgb bulb .
39,499
def set_colourtemp ( self , colourtemp ) : if not 0 <= colourtemp <= 255 : raise ValueError ( "The colour temperature needs to be between 0 and 255." ) payload = self . generate_payload ( SET , { self . DPS_INDEX_COLOURTEMP : colourtemp } ) data = self . _send_receive ( payload ) return data
Set the colour temperature of an rgb bulb .