idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
227,300
def _default_transform_fn ( self , model , content , content_type , accept ) : try : data = self . _input_fn ( content , content_type ) except _errors . UnsupportedFormatError as e : return self . _error_response ( e , http_client . UNSUPPORTED_MEDIA_TYPE ) prediction = self . _predict_fn ( data , model ) try : result = self . _output_fn ( prediction , accept ) except _errors . UnsupportedFormatError as e : return self . _error_response ( e , http_client . NOT_ACCEPTABLE ) return result
Make predictions against the model and return a serialized response .
136
12
227,301
def training_env ( ) : # type: () -> _env.TrainingEnv from sagemaker_containers import _env return _env . TrainingEnv ( resource_config = _env . read_resource_config ( ) , input_data_config = _env . read_input_data_config ( ) , hyperparameters = _env . read_hyperparameters ( ) )
Create a TrainingEnv .
84
6
227,302
def _write_json ( obj , path ) : # type: (object, str) -> None with open ( path , 'w' ) as f : json . dump ( obj , f )
Writes a serializeable object as a JSON file
41
11
227,303
def _create_training_directories ( ) : logger . info ( 'Creating a new training folder under %s .' % base_dir ) os . makedirs ( model_dir ) os . makedirs ( input_config_dir ) os . makedirs ( output_data_dir ) _write_json ( { } , hyperparameters_file_dir ) _write_json ( { } , input_data_config_file_dir ) host_name = socket . gethostname ( ) resources_dict = { "current_host" : host_name , "hosts" : [ host_name ] } _write_json ( resources_dict , resource_config_file_dir )
Creates the directory structure and files necessary for training under the base path
153
14
227,304
def num_gpus ( ) : # type: () -> int try : cmd = shlex . split ( 'nvidia-smi --list-gpus' ) output = subprocess . check_output ( cmd ) . decode ( 'utf-8' ) return sum ( [ 1 for x in output . split ( '\n' ) if x . startswith ( 'GPU ' ) ] ) except ( OSError , subprocess . CalledProcessError ) : logger . info ( 'No GPUs detected (normal if no gpus installed)' ) return 0
The number of gpus available in the current container .
120
11
227,305
def write_env_vars ( env_vars = None ) : # type: (dict) -> None env_vars = env_vars or { } env_vars [ 'PYTHONPATH' ] = ':' . join ( sys . path ) for name , value in env_vars . items ( ) : os . environ [ name ] = value
Write the dictionary env_vars in the system as environment variables .
82
14
227,306
def to_env_vars ( self ) : env = { 'hosts' : self . hosts , 'network_interface_name' : self . network_interface_name , 'hps' : self . hyperparameters , 'user_entry_point' : self . user_entry_point , 'framework_params' : self . additional_framework_parameters , 'resource_config' : self . resource_config , 'input_data_config' : self . input_data_config , 'output_data_dir' : self . output_data_dir , 'channels' : sorted ( self . channel_input_dirs . keys ( ) ) , 'current_host' : self . current_host , 'module_name' : self . module_name , 'log_level' : self . log_level , 'framework_module' : self . framework_module , 'input_dir' : self . input_dir , 'input_config_dir' : self . input_config_dir , 'output_dir' : self . output_dir , 'num_cpus' : self . num_cpus , 'num_gpus' : self . num_gpus , 'model_dir' : self . model_dir , 'module_dir' : self . module_dir , 'training_env' : dict ( self ) , 'user_args' : self . to_cmd_args ( ) , 'output_intermediate_dir' : self . output_intermediate_dir } for name , path in self . channel_input_dirs . items ( ) : env [ 'channel_%s' % name ] = path for key , value in self . hyperparameters . items ( ) : env [ 'hp_%s' % key ] = value return _mapping . to_env_vars ( env )
Environment variable representation of the training environment
404
7
227,307
def array_to_npy ( array_like ) : # type: (np.array or Iterable or int or float) -> object buffer = BytesIO ( ) np . save ( buffer , array_like ) return buffer . getvalue ( )
Convert an array like object to the NPY format .
54
12
227,308
def npy_to_numpy ( npy_array ) : # type: (object) -> np.array stream = BytesIO ( npy_array ) return np . load ( stream , allow_pickle = True )
Convert an NPY array into numpy .
50
10
227,309
def array_to_json ( array_like ) : # type: (np.array or Iterable or int or float) -> str def default ( _array_like ) : if hasattr ( _array_like , 'tolist' ) : return _array_like . tolist ( ) return json . JSONEncoder ( ) . default ( _array_like ) return json . dumps ( array_like , default = default )
Convert an array like object to JSON .
93
9
227,310
def json_to_numpy ( string_like , dtype = None ) : # type: (str) -> np.array data = json . loads ( string_like ) return np . array ( data , dtype = dtype )
Convert a JSON object to a numpy array .
51
11
227,311
def csv_to_numpy ( string_like , dtype = None ) : # type: (str) -> np.array stream = StringIO ( string_like ) return np . genfromtxt ( stream , dtype = dtype , delimiter = ',' )
Convert a CSV object to a numpy array .
59
11
227,312
def array_to_csv ( array_like ) : # type: (np.array or Iterable or int or float) -> str stream = StringIO ( ) np . savetxt ( stream , array_like , delimiter = ',' , fmt = '%s' ) return stream . getvalue ( )
Convert an array like object to CSV .
66
9
227,313
def decode ( obj , content_type ) : # type: (np.array or Iterable or int or float, str) -> np.array try : decoder = _decoders_map [ content_type ] return decoder ( obj ) except KeyError : raise _errors . UnsupportedFormatError ( content_type )
Decode an object ton a one of the default content types to a numpy array .
70
18
227,314
def encode ( array_like , content_type ) : # type: (np.array or Iterable or int or float, str) -> np.array try : encoder = _encoders_map [ content_type ] return encoder ( array_like ) except KeyError : raise _errors . UnsupportedFormatError ( content_type )
Encode an array like object in a specific content_type to a numpy array .
74
18
227,315
def tmpdir ( suffix = '' , prefix = 'tmp' , dir = None ) : # type: (str, str, str) -> None tmp = tempfile . mkdtemp ( suffix = suffix , prefix = prefix , dir = dir ) yield tmp shutil . rmtree ( tmp )
Create a temporary directory with a context manager . The file is deleted when the context exits .
63
18
227,316
def download_and_extract ( uri , name , path ) : # type: (str, str, str) -> None if not os . path . exists ( path ) : os . makedirs ( path ) if not os . listdir ( path ) : with tmpdir ( ) as tmp : if uri . startswith ( 's3://' ) : dst = os . path . join ( tmp , 'tar_file' ) s3_download ( uri , dst ) with tarfile . open ( name = dst , mode = 'r:gz' ) as t : t . extractall ( path = path ) elif os . path . isdir ( uri ) : if uri == path : return if os . path . exists ( path ) : shutil . rmtree ( path ) shutil . move ( uri , path ) else : shutil . copy2 ( uri , os . path . join ( path , name ) )
Download prepare and install a compressed tar file from S3 or local directory as an entry point .
207
19
227,317
def s3_download ( url , dst ) : # type: (str, str) -> None url = parse . urlparse ( url ) if url . scheme != 's3' : raise ValueError ( "Expecting 's3' scheme, got: %s in %s" % ( url . scheme , url ) ) bucket , key = url . netloc , url . path . lstrip ( '/' ) region = os . environ . get ( 'AWS_REGION' , os . environ . get ( _params . REGION_NAME_ENV ) ) s3 = boto3 . resource ( 's3' , region_name = region ) s3 . Bucket ( bucket ) . download_file ( key , dst )
Download a file from S3 .
161
7
227,318
def matching_args ( fn , dictionary ) : # type: (Callable, _mapping.Mapping) -> dict arg_spec = getargspec ( fn ) if arg_spec . keywords : return dictionary return _mapping . split_by_criteria ( dictionary , arg_spec . args ) . included
Given a function fn and a dict dictionary returns the function arguments that match the dict keys .
67
18
227,319
def error_wrapper ( fn , error_class ) : # type: (Callable or None, Exception) -> ... def wrapper ( * args , * * kwargs ) : try : return fn ( * args , * * kwargs ) except Exception as e : six . reraise ( error_class , error_class ( e ) , sys . exc_info ( ) [ 2 ] ) return wrapper
Wraps function fn in a try catch block that re - raises error_class .
86
17
227,320
def ceph_is_installed ( module ) : ceph_package = Ceph ( module . conn ) if not ceph_package . installed : host = module . conn . hostname raise RuntimeError ( 'ceph needs to be installed in remote host: %s' % host )
A helper callback to be executed after the connection is made to ensure that Ceph is installed .
61
19
227,321
def color_format ( ) : str_format = BASE_COLOR_FORMAT if supports_color ( ) else BASE_FORMAT color_format = color_message ( str_format ) return ColoredFormatter ( color_format )
Main entry point to get a colored formatter it will use the BASE_FORMAT by default and fall back to no colors if the system does not support it
50
32
227,322
def mon_status_check ( conn , logger , hostname , args ) : asok_path = paths . mon . asok ( args . cluster , hostname ) out , err , code = remoto . process . check ( conn , [ 'ceph' , '--cluster={cluster}' . format ( cluster = args . cluster ) , '--admin-daemon' , asok_path , 'mon_status' , ] , ) for line in err : logger . error ( line ) try : return json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) except ValueError : return { }
A direct check for JSON output on the monitor status .
140
11
227,323
def catch_mon_errors ( conn , logger , hostname , cfg , args ) : monmap = mon_status_check ( conn , logger , hostname , args ) . get ( 'monmap' , { } ) mon_initial_members = get_mon_initial_members ( args , _cfg = cfg ) public_addr = cfg . safe_get ( 'global' , 'public_addr' ) public_network = cfg . safe_get ( 'global' , 'public_network' ) mon_in_monmap = [ mon . get ( 'name' ) for mon in monmap . get ( 'mons' , [ { } ] ) if mon . get ( 'name' ) == hostname ] if mon_initial_members is None or not hostname in mon_initial_members : logger . warning ( '%s is not defined in `mon initial members`' , hostname ) if not mon_in_monmap : logger . warning ( 'monitor %s does not exist in monmap' , hostname ) if not public_addr and not public_network : logger . warning ( 'neither `public_addr` nor `public_network` keys are defined for monitors' ) logger . warning ( 'monitors may not be able to form quorum' )
Make sure we are able to catch up common mishaps with monitors and use that state of a monitor to determine what is missing and warn apropriately about it .
280
34
227,324
def mon_status ( conn , logger , hostname , args , silent = False ) : mon = 'mon.%s' % hostname try : out = mon_status_check ( conn , logger , hostname , args ) if not out : logger . warning ( 'monitor: %s, might not be running yet' % mon ) return False if not silent : logger . debug ( '*' * 80 ) logger . debug ( 'status for monitor: %s' % mon ) for line in json . dumps ( out , indent = 2 , sort_keys = True ) . split ( '\n' ) : logger . debug ( line ) logger . debug ( '*' * 80 ) if out [ 'rank' ] >= 0 : logger . info ( 'monitor: %s is running' % mon ) return True if out [ 'rank' ] == - 1 and out [ 'state' ] : logger . info ( 'monitor: %s is currently at the state of %s' % ( mon , out [ 'state' ] ) ) return True logger . info ( 'monitor: %s is not running' % mon ) return False except RuntimeError : logger . info ( 'monitor: %s is not running' % mon ) return False
run ceph daemon mon . hostname mon_status on the remote end and provide not only the output but be able to return a boolean status of what is going on . False represents a monitor that is not doing OK even if it is up and running while True would mean the monitor is up and running correctly .
264
63
227,325
def hostname_is_compatible ( conn , logger , provided_hostname ) : logger . debug ( 'determining if provided host has same hostname in remote' ) remote_hostname = conn . remote_module . shortname ( ) if remote_hostname == provided_hostname : return logger . warning ( '*' * 80 ) logger . warning ( 'provided hostname must match remote hostname' ) logger . warning ( 'provided hostname: %s' % provided_hostname ) logger . warning ( 'remote hostname: %s' % remote_hostname ) logger . warning ( 'monitors may not reach quorum and create-keys will not complete' ) logger . warning ( '*' * 80 )
Make sure that the host that we are connecting to has the same value as the hostname in the remote host otherwise mons can fail not reaching quorum .
156
32
227,326
def make ( parser ) : parser . formatter_class = ToggleRawTextHelpFormatter mon_parser = parser . add_subparsers ( dest = 'subcommand' ) mon_parser . required = True mon_add = mon_parser . add_parser ( 'add' , help = ( 'R|Add a monitor to an existing cluster:\n' '\tceph-deploy mon add node1\n' 'Or:\n' '\tceph-deploy mon add --address 192.168.1.10 node1\n' 'If the section for the monitor exists and defines a `mon addr` that\n' 'will be used, otherwise it will fallback by resolving the hostname to an\n' 'IP. If `--address` is used it will override all other options.' ) ) mon_add . add_argument ( '--address' , nargs = '?' , ) mon_add . add_argument ( 'mon' , nargs = 1 , ) mon_create = mon_parser . add_parser ( 'create' , help = ( 'R|Deploy monitors by specifying them like:\n' '\tceph-deploy mon create node1 node2 node3\n' 'If no hosts are passed it will default to use the\n' '`mon initial members` defined in the configuration.' ) ) mon_create . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , ) mon_create . add_argument ( 'mon' , nargs = '*' , ) mon_create_initial = mon_parser . add_parser ( 'create-initial' , help = ( 'Will deploy for monitors defined in `mon initial members`, ' 'wait until they form quorum and then gatherkeys, reporting ' 'the monitor status along the process. If monitors don\'t form ' 'quorum the command will eventually time out.' ) ) mon_create_initial . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , ) mon_destroy = mon_parser . add_parser ( 'destroy' , help = 'Completely remove Ceph MON from remote host(s)' ) mon_destroy . add_argument ( 'mon' , nargs = '+' , ) parser . set_defaults ( func = mon , )
Ceph MON Daemon management
539
6
227,327
def get_mon_initial_members ( args , error_on_empty = False , _cfg = None ) : if _cfg : cfg = _cfg else : cfg = conf . ceph . load ( args ) mon_initial_members = cfg . safe_get ( 'global' , 'mon_initial_members' ) if not mon_initial_members : if error_on_empty : raise exc . NeedHostError ( 'could not find `mon initial members` defined in ceph.conf' ) else : mon_initial_members = re . split ( r'[,\s]+' , mon_initial_members ) return mon_initial_members
Read the Ceph config file and return the value of mon_initial_members Optionally a NeedHostError can be raised if the value is None .
145
31
227,328
def is_running ( conn , args ) : stdout , stderr , _ = remoto . process . check ( conn , args ) result_string = b' ' . join ( stdout ) for run_check in [ b': running' , b' start/running' ] : if run_check in result_string : return True return False
Run a command to check the status of a mon return a boolean .
75
14
227,329
def executable_path ( conn , executable ) : executable_path = conn . remote_module . which ( executable ) if not executable_path : raise ExecutableNotFound ( executable , conn . hostname ) return executable_path
Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so .
47
23
227,330
def is_systemd_service_enabled ( conn , service = 'ceph' ) : _ , _ , returncode = remoto . process . check ( conn , [ 'systemctl' , 'is-enabled' , '--quiet' , '{service}' . format ( service = service ) , ] ) return returncode == 0
Detects if a systemd service is enabled or not .
73
11
227,331
def make ( parser ) : parser . add_argument ( 'repo_name' , metavar = 'REPO-NAME' , help = 'Name of repo to manage. Can match an entry in cephdeploy.conf' ) parser . add_argument ( '--repo-url' , help = 'a repo URL that mirrors/contains Ceph packages' ) parser . add_argument ( '--gpg-url' , help = 'a GPG key URL to be used with custom repos' ) parser . add_argument ( '--remove' , '--delete' , action = 'store_true' , help = 'remove repo definition on remote host' ) parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'host(s) to install on' ) parser . set_defaults ( func = repo )
Repo definition management
197
4
227,332
def get_list ( self , section , key ) : value = self . get_safe ( section , key , [ ] ) if value == [ ] : return value # strip comments value = re . split ( r'\s+#' , value ) [ 0 ] # split on commas value = value . split ( ',' ) # strip spaces return [ x . strip ( ) for x in value ]
Assumes that the value for a given key is going to be a list separated by commas . It gets rid of trailing comments . If just one item is present it returns a list with a single item if no key is found an empty list is returned .
86
52
227,333
def get_default_repo ( self ) : for repo in self . get_repos ( ) : if self . get_safe ( repo , 'default' ) and self . getboolean ( repo , 'default' ) : return repo return False
Go through all the repositories defined in the config file and search for a truthy value for the default key . If there isn t any return None .
54
30
227,334
def validate_host_ip ( ips , subnets ) : # Make sure we prune ``None`` arguments subnets = [ s for s in subnets if s is not None ] validate_one_subnet = len ( subnets ) == 1 def ip_in_one_subnet ( ips , subnet ) : """ ensure an ip exists in at least one subnet """ for ip in ips : if net . ip_in_subnet ( ip , subnet ) : return True return False for subnet in subnets : if ip_in_one_subnet ( ips , subnet ) : if validate_one_subnet : return else : # keep going to make sure the other subnets are ok continue else : msg = "subnet (%s) is not valid for any of the ips found %s" % ( subnet , str ( ips ) ) raise RuntimeError ( msg )
Make sure that a given host all subnets specified will have at least one IP in that range .
197
20
227,335
def get_public_network_ip ( ips , public_subnet ) : for ip in ips : if net . ip_in_subnet ( ip , public_subnet ) : return ip msg = "IPs (%s) are not valid for any of subnet specified %s" % ( str ( ips ) , str ( public_subnet ) ) raise RuntimeError ( msg )
Given a public subnet chose the one IP from the remote host that exists within the subnet range .
86
21
227,336
def make ( parser ) : parser . add_argument ( 'mon' , metavar = 'MON' , nargs = '+' , help = 'initial monitor hostname, fqdn, or hostname:fqdn pair' , type = arg_validators . Hostname ( ) , ) parser . add_argument ( '--no-ssh-copykey' , dest = 'ssh_copykey' , action = 'store_false' , default = True , help = 'do not attempt to copy SSH keys' , ) parser . add_argument ( '--fsid' , dest = 'fsid' , help = 'provide an alternate FSID for ceph.conf generation' , ) parser . add_argument ( '--cluster-network' , help = 'specify the (internal) cluster network' , type = arg_validators . Subnet ( ) , ) parser . add_argument ( '--public-network' , help = 'specify the public network for a cluster' , type = arg_validators . Subnet ( ) , ) parser . set_defaults ( func = new , )
Start deploying a new cluster and write a CLUSTER . conf and keyring for it .
244
19
227,337
def make ( parser ) : mds_parser = parser . add_subparsers ( dest = 'subcommand' ) mds_parser . required = True mds_create = mds_parser . add_parser ( 'create' , help = 'Deploy Ceph MDS on remote host(s)' ) mds_create . add_argument ( 'mds' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on' , ) parser . set_defaults ( func = mds , )
Ceph MDS daemon management
138
6
227,338
def install_yum_priorities ( distro , _yum = None ) : yum = _yum or pkg_managers . yum package_name = 'yum-plugin-priorities' if distro . normalized_name == 'centos' : if distro . release [ 0 ] != '6' : package_name = 'yum-priorities' yum ( distro . conn , package_name )
EPEL started packaging Ceph so we need to make sure that the ceph . repo we install has a higher priority than the EPEL repo so that when installing Ceph it will come from the repo file we create .
99
45
227,339
def make_exception_message ( exc ) : if str ( exc ) : return '%s: %s\n' % ( exc . __class__ . __name__ , exc ) else : return '%s\n' % ( exc . __class__ . __name__ )
An exception is passed in and this function returns the proper string depending on the result so it is readable enough .
62
22
227,340
def platform_information ( _linux_distribution = None ) : linux_distribution = _linux_distribution or platform . linux_distribution distro , release , codename = linux_distribution ( ) if not distro : distro , release , codename = parse_os_release ( ) if not codename and 'debian' in distro . lower ( ) : # this could be an empty string in Debian debian_codenames = { '10' : 'buster' , '9' : 'stretch' , '8' : 'jessie' , '7' : 'wheezy' , '6' : 'squeeze' , } major_version = release . split ( '.' ) [ 0 ] codename = debian_codenames . get ( major_version , '' ) # In order to support newer jessie/sid or wheezy/sid strings we test this # if sid is buried in the minor, we should use sid anyway. if not codename and '/' in release : major , minor = release . split ( '/' ) if minor == 'sid' : codename = minor else : codename = major if not codename and 'oracle' in distro . lower ( ) : # this could be an empty string in Oracle linux codename = 'oracle' if not codename and 'virtuozzo linux' in distro . lower ( ) : # this could be an empty string in Virtuozzo linux codename = 'virtuozzo' if not codename and 'arch' in distro . lower ( ) : # this could be an empty string in Arch linux codename = 'arch' return ( str ( distro ) . rstrip ( ) , str ( release ) . rstrip ( ) , str ( codename ) . rstrip ( ) )
detect platform information from remote host
389
7
227,341
def write_keyring ( path , key , uid = - 1 , gid = - 1 ) : # Note that we *require* to avoid deletion of the temp file # otherwise we risk not being able to copy the contents from # one file system to the other, hence the `delete=False` tmp_file = tempfile . NamedTemporaryFile ( 'wb' , delete = False ) tmp_file . write ( key ) tmp_file . close ( ) keyring_dir = os . path . dirname ( path ) if not path_exists ( keyring_dir ) : makedir ( keyring_dir , uid , gid ) shutil . move ( tmp_file . name , path )
create a keyring file
154
5
227,342
def create_mon_path ( path , uid = - 1 , gid = - 1 ) : if not os . path . exists ( path ) : os . makedirs ( path ) os . chown ( path , uid , gid )
create the mon path if it does not exist
54
9
227,343
def create_done_path ( done_path , uid = - 1 , gid = - 1 ) : with open ( done_path , 'wb' ) : pass os . chown ( done_path , uid , gid )
create a done file to avoid re - doing the mon deployment
52
12
227,344
def create_init_path ( init_path , uid = - 1 , gid = - 1 ) : if not os . path . exists ( init_path ) : with open ( init_path , 'wb' ) : pass os . chown ( init_path , uid , gid )
create the init path if it does not exist
65
9
227,345
def write_monitor_keyring ( keyring , monitor_keyring , uid = - 1 , gid = - 1 ) : write_file ( keyring , monitor_keyring , 0o600 , None , uid , gid )
create the monitor keyring file
53
6
227,346
def which ( executable ) : locations = ( '/usr/local/bin' , '/bin' , '/usr/bin' , '/usr/local/sbin' , '/usr/sbin' , '/sbin' , ) for location in locations : executable_path = os . path . join ( location , executable ) if os . path . exists ( executable_path ) and os . path . isfile ( executable_path ) : return executable_path
find the location of an executable
96
6
227,347
def make_mon_removed_dir ( path , file_name ) : try : os . makedirs ( '/var/lib/ceph/mon-removed' ) except OSError as e : if e . errno != errno . EEXIST : raise shutil . move ( path , os . path . join ( '/var/lib/ceph/mon-removed/' , file_name ) )
move old monitor data
94
4
227,348
def safe_mkdir ( path , uid = - 1 , gid = - 1 ) : try : os . mkdir ( path ) except OSError as e : if e . errno == errno . EEXIST : pass else : raise else : os . chown ( path , uid , gid )
create path if it doesn t exist
70
7
227,349
def safe_makedirs ( path , uid = - 1 , gid = - 1 ) : try : os . makedirs ( path ) except OSError as e : if e . errno == errno . EEXIST : pass else : raise else : os . chown ( path , uid , gid )
create path recursively if it doesn t exist
72
10
227,350
def zeroing ( dev ) : # this kills the crab # # sgdisk will wipe out the main copy of the GPT partition # table (sorry), but it doesn't remove the backup copies, and # subsequent commands will continue to complain and fail when # they see those. zeroing the last few blocks of the device # appears to do the trick. lba_size = 4096 size = 33 * lba_size return True with open ( dev , 'wb' ) as f : f . seek ( - size , os . SEEK_END ) f . write ( size * b'\0' )
zeroing last few blocks of device
128
7
227,351
def enable_yum_priority_obsoletes ( path = "/etc/yum/pluginconf.d/priorities.conf" ) : config = configparser . ConfigParser ( ) config . read ( path ) config . set ( 'main' , 'check_obsoletes' , '1' ) with open ( path , 'w' ) as fout : config . write ( fout )
Configure Yum priorities to include obsoletes
88
10
227,352
def vendorize ( vendor_requirements ) : for library in vendor_requirements : if len ( library ) == 2 : name , version = library cmd = None elif len ( library ) == 3 : # a possible cmd we need to run name , version , cmd = library vendor_library ( name , version , cmd )
This is the main entry point for vendorizing requirements . It expects a list of tuples that should contain the name of the library and the version .
68
30
227,353
def _keyring_equivalent ( keyring_one , keyring_two ) : def keyring_extract_key ( file_path ) : """ Cephx keyring files may or may not have white space before some lines. They may have some values in quotes, so a safe way to compare is to extract the key. """ with open ( file_path ) as f : for line in f : content = line . strip ( ) if len ( content ) == 0 : continue split_line = content . split ( '=' ) if split_line [ 0 ] . strip ( ) == 'key' : return "=" . join ( split_line [ 1 : ] ) . strip ( ) raise RuntimeError ( "File '%s' is not a keyring" % file_path ) key_one = keyring_extract_key ( keyring_one ) key_two = keyring_extract_key ( keyring_two ) return key_one == key_two
Check two keyrings are identical
211
6
227,354
def keytype_path_to ( args , keytype ) : if keytype == "admin" : return '{cluster}.client.admin.keyring' . format ( cluster = args . cluster ) if keytype == "mon" : return '{cluster}.mon.keyring' . format ( cluster = args . cluster ) return '{cluster}.bootstrap-{what}.keyring' . format ( cluster = args . cluster , what = keytype )
Get the local filename for a keyring type
101
9
227,355
def gatherkeys_missing ( args , distro , rlogger , keypath , keytype , dest_dir ) : args_prefix = [ '/usr/bin/ceph' , '--connect-timeout=25' , '--cluster={cluster}' . format ( cluster = args . cluster ) , '--name' , 'mon.' , '--keyring={keypath}' . format ( keypath = keypath ) , ] identity = keytype_identity ( keytype ) if identity is None : raise RuntimeError ( 'Could not find identity for keytype:%s' % keytype ) capabilites = keytype_capabilities ( keytype ) if capabilites is None : raise RuntimeError ( 'Could not find capabilites for keytype:%s' % keytype ) # First try getting the key if it already exists, to handle the case where # it exists but doesn't match the caps we would pass into get-or-create. # This is the same behvaior as in newer ceph-create-keys out , err , code = remoto . process . check ( distro . conn , args_prefix + [ 'auth' , 'get' , identity ] ) if code == errno . ENOENT : out , err , code = remoto . process . check ( distro . conn , args_prefix + [ 'auth' , 'get-or-create' , identity ] + capabilites ) if code != 0 : rlogger . error ( '"ceph auth get-or-create for keytype %s returned %s' , keytype , code ) for line in err : rlogger . debug ( line ) return False keyring_name_local = keytype_path_to ( args , keytype ) keyring_path_local = os . path . join ( dest_dir , keyring_name_local ) with open ( keyring_path_local , 'wb' ) as f : for line in out : f . write ( line + b'\n' ) return True
Get or create the keyring from the mon using the mon keyring by keytype and copy to dest_dir
443
23
227,356
def gatherkeys_with_mon ( args , host , dest_dir ) : distro = hosts . get ( host , username = args . username ) remote_hostname = distro . conn . remote_module . shortname ( ) dir_keytype_mon = ceph_deploy . util . paths . mon . path ( args . cluster , remote_hostname ) path_keytype_mon = "%s/keyring" % ( dir_keytype_mon ) mon_key = distro . conn . remote_module . get_file ( path_keytype_mon ) if mon_key is None : LOG . warning ( "No mon key found in host: %s" , host ) return False mon_name_local = keytype_path_to ( args , "mon" ) mon_path_local = os . path . join ( dest_dir , mon_name_local ) with open ( mon_path_local , 'wb' ) as f : f . write ( mon_key ) rlogger = logging . getLogger ( host ) path_asok = ceph_deploy . util . paths . mon . asok ( args . cluster , remote_hostname ) out , err , code = remoto . process . check ( distro . conn , [ "/usr/bin/ceph" , "--connect-timeout=25" , "--cluster={cluster}" . format ( cluster = args . cluster ) , "--admin-daemon={asok}" . format ( asok = path_asok ) , "mon_status" ] ) if code != 0 : rlogger . error ( '"ceph mon_status %s" returned %s' , host , code ) for line in err : rlogger . debug ( line ) return False try : mon_status = json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) except ValueError : rlogger . error ( '"ceph mon_status %s" output was not json' , host ) for line in out : rlogger . error ( line ) return False mon_number = None mon_map = mon_status . get ( 'monmap' ) if mon_map is None : rlogger . error ( "could not find mon map for mons on '%s'" , host ) return False mon_quorum = mon_status . get ( 'quorum' ) if mon_quorum is None : rlogger . error ( "could not find quorum for mons on '%s'" , host ) return False mon_map_mons = mon_map . get ( 'mons' ) if mon_map_mons is None : rlogger . error ( "could not find mons in monmap on '%s'" , host ) return False for mon in mon_map_mons : if mon . get ( 'name' ) == remote_hostname : mon_number = mon . get ( 'rank' ) break if mon_number is None : rlogger . error ( "could not find '%s' in monmap" , remote_hostname ) return False if not mon_number in mon_quorum : rlogger . error ( "Not yet quorum for '%s'" , host ) return False for keytype in [ "admin" , "mds" , "mgr" , "osd" , "rgw" ] : if not gatherkeys_missing ( args , distro , rlogger , path_keytype_mon , keytype , dest_dir ) : # We will return failure if we fail to gather any key rlogger . error ( "Failed to return '%s' key from host %s" , keytype , host ) return False return True
Connect to mon and gather keys if mon is in quorum .
815
13
227,357
def gatherkeys ( args ) : oldmask = os . umask ( 0o77 ) try : try : tmpd = tempfile . mkdtemp ( ) LOG . info ( "Storing keys in temp directory %s" , tmpd ) sucess = False for host in args . mon : sucess = gatherkeys_with_mon ( args , host , tmpd ) if sucess : break if not sucess : LOG . error ( "Failed to connect to host:%s" , ', ' . join ( args . mon ) ) raise RuntimeError ( 'Failed to connect any mon' ) had_error = False date_string = time . strftime ( "%Y%m%d%H%M%S" ) for keytype in [ "admin" , "mds" , "mgr" , "mon" , "osd" , "rgw" ] : filename = keytype_path_to ( args , keytype ) tmp_path = os . path . join ( tmpd , filename ) if not os . path . exists ( tmp_path ) : LOG . error ( "No key retrived for '%s'" , keytype ) had_error = True continue if not os . path . exists ( filename ) : LOG . info ( "Storing %s" % ( filename ) ) shutil . move ( tmp_path , filename ) continue if _keyring_equivalent ( tmp_path , filename ) : LOG . info ( "keyring '%s' already exists" , filename ) continue backup_keyring = "%s-%s" % ( filename , date_string ) LOG . info ( "Replacing '%s' and backing up old key as '%s'" , filename , backup_keyring ) shutil . copy ( filename , backup_keyring ) shutil . move ( tmp_path , filename ) if had_error : raise RuntimeError ( 'Failed to get all key types' ) finally : LOG . info ( "Destroy temp directory %s" % ( tmpd ) ) shutil . rmtree ( tmpd ) finally : os . umask ( oldmask )
Gather keys from any mon and store in current working directory .
457
13
227,358
def make ( parser ) : parser . add_argument ( 'mon' , metavar = 'HOST' , nargs = '+' , help = 'monitor host to pull keys from' , ) parser . set_defaults ( func = gatherkeys , )
Gather authentication keys for provisioning new nodes .
57
10
227,359
def get ( hostname , username = None , fallback = None , detect_sudo = True , use_rhceph = False , callbacks = None ) : conn = get_connection ( hostname , username = username , logger = logging . getLogger ( hostname ) , detect_sudo = detect_sudo ) try : conn . import_module ( remotes ) except IOError as error : if 'already closed' in getattr ( error , 'message' , '' ) : raise RuntimeError ( 'remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname ) distro_name , release , codename = conn . remote_module . platform_information ( ) if not codename or not _get_distro ( distro_name ) : raise exc . UnsupportedPlatform ( distro = distro_name , codename = codename , release = release ) machine_type = conn . remote_module . machine_type ( ) module = _get_distro ( distro_name , use_rhceph = use_rhceph ) module . name = distro_name module . normalized_name = _normalized_distro_name ( distro_name ) module . normalized_release = _normalized_release ( release ) module . distro = module . normalized_name module . is_el = module . normalized_name in [ 'redhat' , 'centos' , 'fedora' , 'scientific' , 'oracle' , 'virtuozzo' ] module . is_rpm = module . normalized_name in [ 'redhat' , 'centos' , 'fedora' , 'scientific' , 'suse' , 'oracle' , 'virtuozzo' , 'alt' ] module . is_deb = module . normalized_name in [ 'debian' , 'ubuntu' ] module . is_pkgtarxz = module . normalized_name in [ 'arch' ] module . release = release module . codename = codename module . conn = conn module . machine_type = machine_type module . init = module . choose_init ( module ) module . packager = module . get_packager ( module ) # execute each callback if any if callbacks : for c in callbacks : c ( module ) return module
Retrieve the module that matches the distribution of a hostname . This function will connect to that host and retrieve the distribution information then return the appropriate module and slap a few attributes to that module defining the information it found from the hostname .
495
48
227,360
def get_connection ( hostname , username , logger , threads = 5 , use_sudo = None , detect_sudo = True ) : if username : hostname = "%s@%s" % ( username , hostname ) try : conn = remoto . Connection ( hostname , logger = logger , threads = threads , detect_sudo = detect_sudo , ) # Set a timeout value in seconds to disconnect and move on # if no data is sent back. conn . global_timeout = 300 logger . debug ( "connected to host: %s " % hostname ) return conn except Exception as error : msg = "connecting to host: %s " % hostname errors = "resulted in errors: %s %s" % ( error . __class__ . __name__ , error ) raise RuntimeError ( msg + errors )
A very simple helper meant to return a connection that will know about the need to use sudo .
176
19
227,361
def get_local_connection ( logger , use_sudo = False ) : return get_connection ( socket . gethostname ( ) , # cannot rely on 'localhost' here None , logger = logger , threads = 1 , use_sudo = use_sudo , detect_sudo = False )
Helper for local connections that are sometimes needed to operate on local hosts
61
13
227,362
def make ( parser ) : mgr_parser = parser . add_subparsers ( dest = 'subcommand' ) mgr_parser . required = True mgr_create = mgr_parser . add_parser ( 'create' , help = 'Deploy Ceph MGR on remote host(s)' ) mgr_create . add_argument ( 'mgr' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on' , ) parser . set_defaults ( func = mgr , )
Ceph MGR daemon management
138
6
227,363
def make ( parser ) : action = parser . add_mutually_exclusive_group ( ) action . add_argument ( '--install' , metavar = 'PKG(s)' , help = 'Comma-separated package(s) to install' , ) action . add_argument ( '--remove' , metavar = 'PKG(s)' , help = 'Comma-separated package(s) to remove' , ) parser . add_argument ( 'hosts' , nargs = '+' , ) parser . set_defaults ( func = pkg , )
Manage packages on remote hosts .
130
7
227,364
def get_bootstrap_osd_key ( cluster ) : path = '{cluster}.bootstrap-osd.keyring' . format ( cluster = cluster ) try : with open ( path , 'rb' ) as f : return f . read ( ) except IOError : raise RuntimeError ( 'bootstrap-osd keyring not found; run \'gatherkeys\'' )
Read the bootstrap - osd key for cluster .
84
11
227,365
def create_osd_keyring ( conn , cluster , key ) : logger = conn . logger path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring' . format ( cluster = cluster , ) if not conn . remote_module . path_exists ( path ) : logger . warning ( 'osd keyring does not exist yet, creating one' ) conn . remote_module . write_keyring ( path , key )
Run on osd node writes the bootstrap key if not there yet .
101
15
227,366
def osd_tree ( conn , cluster ) : ceph_executable = system . executable_path ( conn , 'ceph' ) command = [ ceph_executable , '--cluster={cluster}' . format ( cluster = cluster ) , 'osd' , 'tree' , '--format=json' , ] out , err , code = remoto . process . check ( conn , command , ) try : loaded_json = json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) ) # convert boolean strings to actual booleans because # --format=json fails to do this properly for k , v in loaded_json . items ( ) : if v == 'true' : loaded_json [ k ] = True elif v == 'false' : loaded_json [ k ] = False return loaded_json except ValueError : return { }
Check the status of an OSD . Make sure all are up and in
193
15
227,367
def catch_osd_errors ( conn , logger , args ) : logger . info ( 'checking OSD status...' ) status = osd_status_check ( conn , args . cluster ) osds = int ( status . get ( 'num_osds' , 0 ) ) up_osds = int ( status . get ( 'num_up_osds' , 0 ) ) in_osds = int ( status . get ( 'num_in_osds' , 0 ) ) full = status . get ( 'full' , False ) nearfull = status . get ( 'nearfull' , False ) if osds > up_osds : difference = osds - up_osds logger . warning ( 'there %s %d OSD%s down' % ( [ 'is' , 'are' ] [ difference != 1 ] , difference , "s" [ difference == 1 : ] ) ) if osds > in_osds : difference = osds - in_osds logger . warning ( 'there %s %d OSD%s out' % ( [ 'is' , 'are' ] [ difference != 1 ] , difference , "s" [ difference == 1 : ] ) ) if full : logger . warning ( 'OSDs are full!' ) if nearfull : logger . warning ( 'OSDs are near full!' )
Look for possible issues when checking the status of an OSD and report them back to the user .
291
20
227,368
def create_osd ( conn , cluster , data , journal , zap , fs_type , dmcrypt , dmcrypt_dir , storetype , block_wal , block_db , * * kw ) : ceph_volume_executable = system . executable_path ( conn , 'ceph-volume' ) args = [ ceph_volume_executable , '--cluster' , cluster , 'lvm' , 'create' , '--%s' % storetype , '--data' , data ] if zap : LOG . warning ( 'zapping is no longer supported when preparing' ) if dmcrypt : args . append ( '--dmcrypt' ) # TODO: re-enable dmcrypt support once ceph-volume grows it LOG . warning ( 'dmcrypt is currently not supported' ) if storetype == 'bluestore' : if block_wal : args . append ( '--block.wal' ) args . append ( block_wal ) if block_db : args . append ( '--block.db' ) args . append ( block_db ) elif storetype == 'filestore' : if not journal : raise RuntimeError ( 'A journal lv or GPT partition must be specified when using filestore' ) args . append ( '--journal' ) args . append ( journal ) if kw . get ( 'debug' ) : remoto . process . run ( conn , args , extend_env = { 'CEPH_VOLUME_DEBUG' : '1' } ) else : remoto . process . run ( conn , args )
Run on osd node creates an OSD from a data disk .
349
14
227,369
def make_disk ( parser ) : disk_parser = parser . add_subparsers ( dest = 'subcommand' ) disk_parser . required = True disk_zap = disk_parser . add_parser ( 'zap' , help = 'destroy existing data and filesystem on LV or partition' , ) disk_zap . add_argument ( 'host' , nargs = '?' , metavar = 'HOST' , help = 'Remote HOST(s) to connect' ) disk_zap . add_argument ( 'disk' , nargs = '+' , metavar = 'DISK' , help = 'Disk(s) to zap' ) disk_zap . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) disk_list = disk_parser . add_parser ( 'list' , help = 'List disk info from remote host(s)' ) disk_list . add_argument ( 'host' , nargs = '+' , metavar = 'HOST' , help = 'Remote HOST(s) to list OSDs from' ) disk_list . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , ) parser . set_defaults ( func = disk , )
Manage disks on a remote host .
310
8
227,370
def repository_url_part ( distro ) : if distro . normalized_release . int_major >= 6 : if distro . normalized_name == 'redhat' : return 'rhel' + distro . normalized_release . major if distro . normalized_name in [ 'centos' , 'scientific' , 'oracle' , 'virtuozzo' ] : return 'el' + distro . normalized_release . major return 'el6'
Historically everything CentOS RHEL and Scientific has been mapped to el6 urls but as we are adding repositories for rhel the URLs should map correctly to say rhel6 or rhel7 .
100
40
227,371
def sanitize_args ( args ) : if args . release is None : args . release = 'nautilus' args . default_release = True # XXX This whole dance is because --stable is getting deprecated if args . stable is not None : LOG . warning ( 'the --stable flag is deprecated, use --release instead' ) args . release = args . stable # XXX Tango ends here. return args
args may need a bunch of logic to set proper defaults that argparse is not well suited for .
87
20
227,372
def should_use_custom_repo ( args , cd_conf , repo_url ) : if repo_url : # repo_url signals a CLI override, return False immediately return False if cd_conf : if cd_conf . has_repos : has_valid_release = args . release in cd_conf . get_repos ( ) has_default_repo = cd_conf . get_default_repo ( ) if has_valid_release or has_default_repo : return True return False
A boolean to determine the logic needed to proceed with a custom repo installation instead of cramming everything nect to the logic operator .
112
26
227,373
def make_uninstall ( parser ) : parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'hosts to uninstall Ceph from' , ) parser . set_defaults ( func = uninstall , )
Remove Ceph packages from remote hosts .
60
8
227,374
def make_purge ( parser ) : parser . add_argument ( 'host' , metavar = 'HOST' , nargs = '+' , help = 'hosts to purge Ceph from' , ) parser . set_defaults ( func = purge , )
Remove Ceph packages from remote hosts and purge all data .
60
12
227,375
def make ( parser ) : rgw_parser = parser . add_subparsers ( dest = 'subcommand' ) rgw_parser . required = True rgw_create = rgw_parser . add_parser ( 'create' , help = 'Create an RGW instance' ) rgw_create . add_argument ( 'rgw' , metavar = 'HOST[:NAME]' , nargs = '+' , type = colon_separated , help = 'host (and optionally the daemon name) to deploy on. \ NAME is automatically prefixed with \'rgw.\'' , ) parser . set_defaults ( func = rgw , )
Ceph RGW daemon management
146
6
227,376
def can_connect_passwordless ( hostname ) : # Ensure we are not doing this for local hosts if not remoto . backends . needs_ssh ( hostname ) : return True logger = logging . getLogger ( hostname ) with get_local_connection ( logger ) as conn : # Check to see if we can login, disabling password prompts command = [ 'ssh' , '-CT' , '-o' , 'BatchMode=yes' , hostname , 'true' ] out , err , retval = remoto . process . check ( conn , command , stop_on_error = False ) permission_denied_error = 'Permission denied ' host_key_verify_error = 'Host key verification failed.' has_key_error = False for line in err : if permission_denied_error in line or host_key_verify_error in line : has_key_error = True if retval == 255 and has_key_error : return False return True
Ensure that current host can SSH remotely to the remote host using the BatchMode option to prevent a password prompt .
216
24
227,377
def ip_in_subnet ( ip , subnet ) : ipaddr = int ( '' . join ( [ '%02x' % int ( x ) for x in ip . split ( '.' ) ] ) , 16 ) netstr , bits = subnet . split ( '/' ) netaddr = int ( '' . join ( [ '%02x' % int ( x ) for x in netstr . split ( '.' ) ] ) , 16 ) mask = ( 0xffffffff << ( 32 - int ( bits ) ) ) & 0xffffffff return ( ipaddr & mask ) == ( netaddr & mask )
Does IP exists in a given subnet utility . Returns a boolean
134
13
227,378
def in_subnet ( cidr , addrs = None ) : for address in addrs : if ip_in_subnet ( address , cidr ) : return True return False
Returns True if host is within specified subnet otherwise False
41
11
227,379
def get_chacra_repo ( shaman_url ) : shaman_response = get_request ( shaman_url ) chacra_url = shaman_response . geturl ( ) chacra_response = get_request ( chacra_url ) return chacra_response . read ( )
From a Shaman URL get the chacra url for a repository read the contents that point to the repo and return it as a string .
67
28
227,380
def map_components ( notsplit_packages , components ) : packages = set ( ) for c in components : if c in notsplit_packages : packages . add ( 'ceph' ) else : packages . add ( c ) return list ( packages )
Returns a list of packages to install based on component names
54
11
227,381
def start_mon_service ( distro , cluster , hostname ) : if distro . init == 'sysvinit' : service = distro . conn . remote_module . which_service ( ) remoto . process . run ( distro . conn , [ service , 'ceph' , '-c' , '/etc/ceph/{cluster}.conf' . format ( cluster = cluster ) , 'start' , 'mon.{hostname}' . format ( hostname = hostname ) ] , timeout = 7 , ) system . enable_service ( distro . conn ) elif distro . init == 'upstart' : remoto . process . run ( distro . conn , [ 'initctl' , 'emit' , 'ceph-mon' , 'cluster={cluster}' . format ( cluster = cluster ) , 'id={hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , ) elif distro . init == 'systemd' : # enable ceph target for this host (in case it isn't already enabled) remoto . process . run ( distro . conn , [ 'systemctl' , 'enable' , 'ceph.target' ] , timeout = 7 , ) # enable and start this mon instance remoto . process . run ( distro . conn , [ 'systemctl' , 'enable' , 'ceph-mon@{hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , ) remoto . process . run ( distro . conn , [ 'systemctl' , 'start' , 'ceph-mon@{hostname}' . format ( hostname = hostname ) , ] , timeout = 7 , )
start mon service depending on distro init
381
8
227,382
def __voronoi_finite_polygons_2d ( vor , radius = None ) : if vor . points . shape [ 1 ] != 2 : raise ValueError ( "Requires 2D input" ) new_regions = [ ] new_vertices = vor . vertices . tolist ( ) center = vor . points . mean ( axis = 0 ) if radius is None : radius = vor . points . ptp ( ) . max ( ) # Construct a map containing all ridges for a given point all_ridges = { } for ( p1 , p2 ) , ( v1 , v2 ) in zip ( vor . ridge_points , vor . ridge_vertices ) : all_ridges . setdefault ( p1 , [ ] ) . append ( ( p2 , v1 , v2 ) ) all_ridges . setdefault ( p2 , [ ] ) . append ( ( p1 , v1 , v2 ) ) # Reconstruct infinite regions for p1 , region in enumerate ( vor . point_region ) : vertices = vor . regions [ region ] if all ( v >= 0 for v in vertices ) : # finite region new_regions . append ( vertices ) continue # reconstruct a non-finite region if p1 not in all_ridges : continue ridges = all_ridges [ p1 ] new_region = [ v for v in vertices if v >= 0 ] for p2 , v1 , v2 in ridges : if v2 < 0 : v1 , v2 = v2 , v1 if v1 >= 0 : # finite ridge: already in the region continue # Compute the missing endpoint of an infinite ridge t = vor . points [ p2 ] - vor . points [ p1 ] # tangent t /= np . linalg . norm ( t ) n = np . array ( [ - t [ 1 ] , t [ 0 ] ] ) # normal midpoint = vor . points [ [ p1 , p2 ] ] . mean ( axis = 0 ) direction = np . sign ( np . dot ( midpoint - center , n ) ) * n far_point = vor . vertices [ v2 ] + direction * radius new_region . append ( len ( new_vertices ) ) new_vertices . append ( far_point . tolist ( ) ) # sort region counterclockwise vs = np . asarray ( [ new_vertices [ v ] for v in new_region ] ) c = vs . mean ( axis = 0 ) angles = np . arctan2 ( vs [ : , 1 ] - c [ 1 ] , vs [ : , 0 ] - c [ 0 ] ) new_region = np . array ( new_region ) [ np . argsort ( angles ) ] # finish new_regions . append ( new_region . tolist ( ) ) return new_regions , np . asarray ( new_vertices )
Reconstruct infinite voronoi regions in a 2D diagram to finite regions .
641
18
227,383
def inline ( width = 900 ) : from IPython . display import Image , HTML , display , clear_output import random import string import urllib import os while True : fname = '' . join ( random . choice ( string . ascii_uppercase + string . digits ) for _ in range ( 32 ) ) if not os . path . isfile ( fname + '.png' ) : break savefig ( fname ) if os . path . isfile ( fname + '.png' ) : with open ( fname + '.png' , 'rb' ) as fin : encoded = base64 . b64encode ( fin . read ( ) ) b64 = urllib . parse . quote ( encoded ) image_html = "<img style='width: %dpx; margin: 0px; float: left; border: 1px solid black;' src='data:image/png;base64,%s' />" % ( width , b64 ) display ( HTML ( image_html ) ) os . remove ( fname + '.png' )
display the map inline in ipython
229
7
227,384
def dot ( data , color = None , point_size = 2 , f_tooltip = None ) : from geoplotlib . layers import DotDensityLayer _global_config . layers . append ( DotDensityLayer ( data , color = color , point_size = point_size , f_tooltip = f_tooltip ) )
Create a dot density map
74
5
227,385
def hist ( data , cmap = 'hot' , alpha = 220 , colorscale = 'sqrt' , binsize = 16 , show_tooltip = False , scalemin = 0 , scalemax = None , f_group = None , show_colorbar = True ) : from geoplotlib . layers import HistogramLayer _global_config . layers . append ( HistogramLayer ( data , cmap = cmap , alpha = alpha , colorscale = colorscale , binsize = binsize , show_tooltip = show_tooltip , scalemin = scalemin , scalemax = scalemax , f_group = f_group , show_colorbar = show_colorbar ) )
Create a 2D histogram
152
6
227,386
def shapefiles ( fname , f_tooltip = None , color = None , linewidth = 3 , shape_type = 'full' ) : from geoplotlib . layers import ShapefileLayer _global_config . layers . append ( ShapefileLayer ( fname , f_tooltip , color , linewidth , shape_type ) )
Load and draws shapefiles
76
5
227,387
def voronoi ( data , line_color = None , line_width = 2 , f_tooltip = None , cmap = None , max_area = 1e4 , alpha = 220 ) : from geoplotlib . layers import VoronoiLayer _global_config . layers . append ( VoronoiLayer ( data , line_color , line_width , f_tooltip , cmap , max_area , alpha ) )
Draw the voronoi tesselation of the points
96
12
227,388
def delaunay ( data , line_color = None , line_width = 2 , cmap = None , max_lenght = 100 ) : from geoplotlib . layers import DelaunayLayer _global_config . layers . append ( DelaunayLayer ( data , line_color , line_width , cmap , max_lenght ) )
Draw a delaunay triangulation of the points
79
11
227,389
def convexhull ( data , col , fill = True , point_size = 4 ) : from geoplotlib . layers import ConvexHullLayer _global_config . layers . append ( ConvexHullLayer ( data , col , fill , point_size ) )
Convex hull for a set of points
62
9
227,390
def kde ( data , bw , cmap = 'hot' , method = 'hist' , scaling = 'sqrt' , alpha = 220 , cut_below = None , clip_above = None , binsize = 1 , cmap_levels = 10 , show_colorbar = False ) : from geoplotlib . layers import KDELayer _global_config . layers . append ( KDELayer ( data , bw , cmap , method , scaling , alpha , cut_below , clip_above , binsize , cmap_levels , show_colorbar ) )
Kernel density estimation visualization
125
5
227,391
def labels ( data , label_column , color = None , font_name = FONT_NAME , font_size = 14 , anchor_x = 'left' , anchor_y = 'top' ) : from geoplotlib . layers import LabelsLayer _global_config . layers . append ( LabelsLayer ( data , label_column , color , font_name , font_size , anchor_x , anchor_y ) )
Draw a text label for each sample
94
7
227,392
def set_map_alpha ( alpha ) : if alpha < 0 or alpha > 255 : raise Exception ( 'invalid alpha ' + str ( alpha ) ) _global_config . map_alpha = alpha
Alpha color of the map tiles
43
6
227,393
def read_csv ( fname ) : values = defaultdict ( list ) with open ( fname ) as f : reader = csv . DictReader ( f ) for row in reader : for ( k , v ) in row . items ( ) : values [ k ] . append ( v ) npvalues = { k : np . array ( values [ k ] ) for k in values . keys ( ) } for k in npvalues . keys ( ) : for datatype in [ np . int , np . float ] : try : npvalues [ k ] [ : 1 ] . astype ( datatype ) npvalues [ k ] = npvalues [ k ] . astype ( datatype ) break except : pass dao = DataAccessObject ( npvalues ) return dao
Read a csv file into a DataAccessObject
167
10
227,394
def head ( self , n ) : return DataAccessObject ( { k : self . dict [ k ] [ : n ] for k in self . dict } )
Return a DataAccessObject containing the first n rows
34
10
227,395
def from_points ( lons , lats ) : north , west = max ( lats ) , min ( lons ) south , east = min ( lats ) , max ( lons ) return BoundingBox ( north = north , west = west , south = south , east = east )
Compute the BoundingBox from a set of latitudes and longitudes
63
15
227,396
def from_bboxes ( bboxes ) : north = max ( [ b . north for b in bboxes ] ) south = min ( [ b . south for b in bboxes ] ) west = min ( [ b . west for b in bboxes ] ) east = max ( [ b . east for b in bboxes ] ) return BoundingBox ( north = north , west = west , south = south , east = east )
Compute a BoundingBox enclosing all specified bboxes
91
12
227,397
def get_pydoc_completions ( modulename ) : modulename = compat . ensure_not_unicode ( modulename ) modulename = modulename . rstrip ( "." ) if modulename == "" : return sorted ( get_modules ( ) ) candidates = get_completions ( modulename ) if candidates : return sorted ( candidates ) needle = modulename if "." in needle : modulename , part = needle . rsplit ( "." , 1 ) candidates = get_completions ( modulename ) else : candidates = get_modules ( ) return sorted ( candidate for candidate in candidates if candidate . startswith ( needle ) )
Get possible completions for modulename for pydoc .
149
13
227,398
def get_modules ( modulename = None ) : modulename = compat . ensure_not_unicode ( modulename ) if not modulename : try : return ( [ modname for ( importer , modname , ispkg ) in iter_modules ( ) if not modname . startswith ( "_" ) ] + list ( sys . builtin_module_names ) ) except OSError : # Bug in Python 2.6, see #275 return list ( sys . builtin_module_names ) try : module = safeimport ( modulename ) except ErrorDuringImport : return [ ] if module is None : return [ ] if hasattr ( module , "__path__" ) : return [ modname for ( importer , modname , ispkg ) in iter_modules ( module . __path__ ) if not modname . startswith ( "_" ) ] return [ ]
Return a list of modules and packages under modulename .
196
12
227,399
def read_json ( self ) : line = self . stdin . readline ( ) if line == '' : raise EOFError ( ) return json . loads ( line )
Read a single line and decode it as JSON .
37
10