idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
16,400
def _get_candidates ( self ) : candidates = np . where ( self . dpp_vector == 0 ) return None if len ( candidates [ 0 ] ) == 0 else candidates [ 0 ]
Finds the pipelines that are not yet tried .
16,401
def propose ( self ) : candidates = self . _get_candidates ( ) if candidates is None : return None predictions = self . predict ( candidates ) idx = self . _acquire ( predictions ) return candidates [ idx ]
Use the trained model to propose a new pipeline .
16,402
def add ( self , X ) : for each in X : self . dpp_vector [ each ] = X [ each ] self . fit ( self . dpp_vector . reshape ( 1 , - 1 ) )
Add data about known pipeline and scores .
16,403
def _init_az_api ( self ) : with self . __lock : if self . _resource_client is None : log . debug ( "Making Azure `ServicePrincipalcredentials` object" " with tenant=%r, client_id=%r, secret=%r ..." , self . tenant_id , self . client_id , ( '<redacted>' if self . secret else None ) ) credentials = ServicePrincipalCredentials ( tenant = self . tenant_id , client_id = self . client_id , secret = self . secret , ) log . debug ( "Initializing Azure `ComputeManagementclient` ..." ) self . _compute_client = ComputeManagementClient ( credentials , self . subscription_id ) log . debug ( "Initializing Azure `NetworkManagementclient` ..." ) self . _network_client = NetworkManagementClient ( credentials , self . subscription_id ) log . debug ( "Initializing Azure `ResourceManagementclient` ..." ) self . _resource_client = ResourceManagementClient ( credentials , self . subscription_id ) log . info ( "Azure API clients initialized." )
Initialise client objects for talking to Azure API .
16,404
def start_instance ( self , key_name , public_key_path , private_key_path , security_group , flavor , image_id , image_userdata , username = 'root' , node_name = None , boot_disk_size = 30 , storage_account_type = 'Standard_LRS' , ** extra ) : self . _init_az_api ( ) if security_group and security_group != 'default' : warn ( "Setting `security_group` is currently not supported" " in the Azure cloud; VMs will all be attached to" " a network security group named after the cluster name." ) if image_userdata : warn ( "Parameter `image_userdata` is currently not supported" " in the Azure cloud and will be ignored." ) cluster_name , _ = node_name . split ( '-' , 1 ) with self . __lock : if cluster_name not in self . _resource_groups_created : self . _resource_client . resource_groups . create_or_update ( cluster_name , { 'location' : self . location } ) self . _resource_groups_created . add ( cluster_name ) with open ( public_key_path , 'r' ) as public_key_file : public_key = public_key_file . read ( ) image_publisher , image_offer , image_sku , image_version = self . _split_image_id ( image_id ) if not security_group : security_group = ( cluster_name + '-secgroup' ) net_parameters = { 'networkSecurityGroupName' : { 'value' : security_group , } , 'subnetName' : { 'value' : cluster_name } , } net_name = net_parameters [ 'subnetName' ] [ 'value' ] with self . __lock : if net_name not in self . _networks_created : log . debug ( "Creating network `%s` in Azure ..." , net_name ) oper = self . _resource_client . deployments . create_or_update ( cluster_name , net_name , { 'mode' : DeploymentMode . incremental , 'template' : self . net_deployment_template , 'parameters' : net_parameters , } ) oper . wait ( ) self . _networks_created . add ( net_name ) boot_disk_size_gb = int ( boot_disk_size ) vm_parameters = { 'adminUserName' : { 'value' : username } , 'imagePublisher' : { 'value' : image_publisher } , 'imageOffer' : { 'value' : image_offer } , 'imageSku' : { 'value' : image_sku } , 'imageVersion' : { 'value' : image_version } , 'networkSecurityGroupName' : { 'value' : security_group , } , 'sshKeyData' : { 'value' : public_key } , 'storageAccountName' : { 'value' : self . _make_storage_account_name ( cluster_name , node_name ) } , 'storageAccountType' : { 'value' : storage_account_type } , 'subnetName' : { 'value' : cluster_name } , 'vmName' : { 'value' : node_name } , 'vmSize' : { 'value' : flavor } , 'bootDiskSize' : { 'value' : boot_disk_size_gb } } log . debug ( "Deploying `%s` VM template to Azure ..." , vm_parameters [ 'vmName' ] [ 'value' ] ) oper = self . _resource_client . deployments . create_or_update ( cluster_name , node_name , { 'mode' : DeploymentMode . incremental , 'template' : self . vm_deployment_template , 'parameters' : vm_parameters , } ) oper . wait ( ) return [ cluster_name , node_name ]
Start a new VM using the given properties .
16,405
def is_instance_running ( self , instance_id ) : self . _init_az_api ( ) vm = self . _get_vm ( instance_id , force_reload = True ) return vm . provisioning_state == u'Succeeded'
Check if the instance is up and running .
16,406
def _get_vm ( self , instance_id , force_reload = True ) : self . _init_az_api ( ) if force_reload : self . _inventory = { } cluster_name , node_name = instance_id self . _init_inventory ( cluster_name ) if node_name not in self . _vm_details : vm_info = self . _compute_client . virtual_machines . get ( cluster_name , node_name , 'instanceView' ) self . _vm_details [ node_name ] = vm_info try : return self . _vm_details [ node_name ] except KeyError : raise InstanceNotFoundError ( "Instance `{instance_id}` not found" . format ( instance_id = instance_id ) )
Return details on the VM with the given name .
16,407
def inspect_node ( node ) : node_information = { } ssh = node . connect ( ) if not ssh : log . error ( "Unable to connect to node %s" , node . name ) return ( _in , _out , _err ) = ssh . exec_command ( "(type >& /dev/null -a srun && echo slurm) \ || (type >& /dev/null -a qconf && echo sge) \ || (type >& /dev/null -a pbsnodes && echo pbs) \ || echo UNKNOWN" ) node_information [ 'type' ] = _out . read ( ) . strip ( ) ( _in , _out , _err ) = ssh . exec_command ( "arch" ) node_information [ 'architecture' ] = _out . read ( ) . strip ( ) if node_information [ 'type' ] == 'slurm' : inspect_slurm_cluster ( ssh , node_information ) elif node_information [ 'type' ] == 'sge' : inspect_sge_cluster ( ssh , node_information ) ssh . close ( ) return node_information
This function accept a elasticluster . cluster . Node class connects to a node and tries to discover the kind of batch system installed and some other information .
16,408
def create_gc3pie_config_snippet ( cluster ) : auth_section = 'auth/elasticluster_%s' % cluster . name resource_section = 'resource/elasticluster_%s' % cluster . name cfg = RawConfigParser ( ) cfg . add_section ( auth_section ) frontend_node = cluster . get_ssh_to_node ( ) cfg . set ( auth_section , 'type' , 'ssh' ) cfg . set ( auth_section , 'username' , frontend_node . image_user ) cluster_info = inspect_node ( frontend_node ) cfg . add_section ( resource_section ) cfg . set ( resource_section , 'enabled' , 'yes' ) cfg . set ( resource_section , 'transport' , 'ssh' ) cfg . set ( resource_section , 'frontend' , frontend_node . preferred_ip ) if not cluster_info : log . error ( "Unable to gather enough information from the cluster. " "Following informatino are only partial!" ) cluster_info = { 'architecture' : 'unknown' , 'type' : 'unknown' , 'max_cores' : - 1 , 'max_cores_per_job' : - 1 , 'max_memory_per_core' : - 1 , 'max_walltime' : '672hours' } cfg . set ( resource_section , 'type' , cluster_info [ 'type' ] ) cfg . set ( resource_section , 'architecture' , cluster_info [ 'architecture' ] ) cfg . set ( resource_section , 'max_cores' , cluster_info . get ( 'max_cores' , 1 ) ) cfg . set ( resource_section , 'max_cores_per_job' , cluster_info . get ( 'max_cores_per_job' , 1 ) ) cfg . set ( resource_section , 'max_memory_per_core' , cluster_info . get ( 'max_memory_per_core' , '2GB' ) ) cfg . set ( resource_section , 'max_walltime' , cluster_info . get ( 'max_walltime' , '672hours' ) ) cfgstring = StringIO ( ) cfg . write ( cfgstring ) return cfgstring . getvalue ( )
Create a configuration file snippet to be used with GC3Pie .
16,409
def _execute_request ( self , request ) : with GoogleCloudProvider . __gce_lock : return request . execute ( http = self . _auth_http )
Helper method to execute a request since a lock should be used to not fire up multiple requests at the same time .
16,410
def _wait_until_done ( self , response , wait = 30 ) : gce = self . _connect ( ) status = response [ 'status' ] while status != 'DONE' and response : if wait : time . sleep ( 1 + random . randrange ( wait ) ) operation_id = response [ 'name' ] if 'zone' in response : zone_name = response [ 'zone' ] . split ( '/' ) [ - 1 ] request = gce . zoneOperations ( ) . get ( project = self . _project_id , operation = operation_id , zone = zone_name ) else : request = gce . globalOperations ( ) . get ( project = self . _project_id , operation = operation_id ) response = self . _execute_request ( request ) if response : status = response [ 'status' ] return response
Blocks until the operation status is done for the given operation .
16,411
def pause_instance ( self , instance_id ) : if not instance_id : log . info ( "Instance to pause has no instance id." ) return gce = self . _connect ( ) try : request = gce . instances ( ) . stop ( project = self . _project_id , instance = instance_id , zone = self . _zone ) operation = self . _execute_request ( request ) response = self . _wait_until_done ( operation ) self . _check_response ( response ) return { "instance_id" : instance_id } except HttpError as e : log . error ( "Error stopping instance: `%s" , e ) raise InstanceError ( "Error stopping instance `%s`" , e )
Pauses the instance retaining disk and config .
16,412
def resume_instance ( self , paused_info ) : if not paused_info . get ( "instance_id" ) : log . info ( "Instance to stop has no instance id." ) return gce = self . _connect ( ) try : request = gce . instances ( ) . start ( project = self . _project_id , instance = paused_info [ "instance_id" ] , zone = self . _zone ) operation = self . _execute_request ( request ) response = self . _wait_until_done ( operation ) self . _check_response ( response ) return except HttpError as e : log . error ( "Error restarting instance: `%s" , e ) raise InstanceError ( "Error restarting instance `%s`" , e )
Restarts a paused instance retaining disk and config .
16,413
def list_instances ( self , filter = None ) : gce = self . _connect ( ) try : request = gce . instances ( ) . list ( project = self . _project_id , filter = filter , zone = self . _zone ) response = self . _execute_request ( request ) self . _check_response ( response ) except ( HttpError , CloudProviderError ) as e : raise InstanceError ( "could not retrieve all instances on the " "cloud: ``" % e ) if response and 'items' in response : return response [ 'items' ] else : return list ( )
List instances on GCE optionally filtering the results .
16,414
def is_instance_running ( self , instance_id ) : items = self . list_instances ( filter = ( 'name eq "%s"' % instance_id ) ) for item in items : if item [ 'status' ] == 'RUNNING' : return True return False
Check whether the instance is up and running .
16,415
def __init_keystone_session ( self ) : api = self . _identity_api_version tried = [ ] if api in [ '3' , None ] : sess = self . __init_keystone_session_v3 ( check = ( api is None ) ) tried . append ( 'v3' ) if sess : return sess if api in [ '2' , None ] : sess = self . __init_keystone_session_v2 ( check = ( api is None ) ) tried . append ( 'v2' ) if sess : return sess raise RuntimeError ( "Cannot establish Keystone session (tried: {0})." . format ( ', ' . join ( tried ) ) )
Create and return a Keystone session object .
16,416
def __init_keystone_session_v2 ( self , check = False ) : from keystoneauth1 import loading as keystone_v2 loader = keystone_v2 . get_plugin_loader ( 'password' ) auth = loader . load_from_options ( auth_url = self . _os_auth_url , username = self . _os_username , password = self . _os_password , project_name = self . _os_tenant_name , ) sess = keystoneauth1 . session . Session ( auth = auth , verify = self . _os_cacert ) if check : log . debug ( "Checking that Keystone API v2 session works..." ) try : nova = nova_client . Client ( self . _compute_api_version , session = sess , cacert = self . _os_cacert ) nova . flavors . list ( ) except keystoneauth1 . exceptions . NotFound as err : log . warning ( "Creating Keystone v2 session failed: %s" , err ) return None except keystoneauth1 . exceptions . ClientException as err : log . error ( "OpenStack server rejected request (likely configuration error?): %s" , err ) return None log . info ( "Using Keystone API v2 session to authenticate to OpenStack" ) return sess
Create and return a session object using Keystone API v2 .
16,417
def __init_keystone_session_v3 ( self , check = False ) : try : from keystoneauth1 . identity import v3 as keystone_v3 except ImportError : log . warning ( "Cannot load Keystone API v3 library." ) return None auth = keystone_v3 . Password ( auth_url = self . _os_auth_url , username = self . _os_username , password = self . _os_password , user_domain_name = self . _os_user_domain_name , project_domain_name = self . _os_project_domain_name , project_name = self . _os_tenant_name , ) sess = keystoneauth1 . session . Session ( auth = auth , verify = self . _os_cacert ) if check : log . debug ( "Checking that Keystone API v3 session works..." ) try : nova = nova_client . Client ( self . _compute_api_version , session = sess ) nova . flavors . list ( ) except keystoneauth1 . exceptions . NotFound as err : log . warning ( "Creating Keystone v3 session failed: %s" , err ) return None except keystoneauth1 . exceptions . ClientException as err : log . error ( "OpenStack server rejected request (likely configuration error?): %s" , err ) return None log . info ( "Using Keystone API v3 session to authenticate to OpenStack" ) return sess
Return a new session object created using Keystone API v3 .
16,418
def _check_security_groups ( self , names ) : self . _init_os_api ( ) log . debug ( "Checking existence of security group(s) %s ..." , names ) try : security_groups = self . nova_client . security_groups . list ( ) existing = set ( sg . name for sg in security_groups ) except AttributeError : security_groups = self . neutron_client . list_security_groups ( ) [ 'security_groups' ] existing = set ( sg [ u'name' ] for sg in security_groups ) nonexisting = set ( names ) - existing if nonexisting : raise SecurityGroupError ( "Security group(s) `{0}` do not exist" . format ( ', ' . join ( nonexisting ) ) ) return True
Raise an exception if any of the named security groups does not exist .
16,419
def _get_images ( self ) : self . _init_os_api ( ) try : return self . nova_client . images . list ( ) except AttributeError : return list ( self . glance_client . images . list ( ) )
Get available images . We cache the results in order to reduce network usage .
16,420
def main ( self ) : assert self . params . func , "No subcommand defined in `ElastiCluster.main()" try : return self . params . func ( ) except Exception as err : log . error ( "Error: %s" , err ) if self . params . verbose > 2 : import traceback traceback . print_exc ( ) print ( "Aborting because of errors: {err}." . format ( err = err ) ) sys . exit ( 1 )
This is the main entry point of the ElastiCluster CLI .
16,421
def confirm_or_abort ( prompt , exitcode = os . EX_TEMPFAIL , msg = None , ** extra_args ) : if click . confirm ( prompt , ** extra_args ) : return True else : if msg : sys . stderr . write ( msg ) sys . stderr . write ( '\n' ) sys . exit ( exitcode )
Prompt user for confirmation and exit on negative reply .
16,422
def environment ( ** kv ) : added = [ ] changed = { } for key , value in kv . items ( ) : if key not in os . environ : added . append ( key ) else : changed [ key ] = os . environ [ key ] os . environ [ key ] = value yield for key in added : del os . environ [ key ] for key in changed : os . environ [ key ] = changed [ key ]
Context manager to run Python code with a modified UNIX process environment .
16,423
def expand_ssh_proxy_command ( command , user , addr , port = 22 ) : translated = [ ] subst = { 'h' : list ( str ( addr ) ) , 'p' : list ( str ( port ) ) , 'r' : list ( str ( user ) ) , '%' : [ '%' ] , } escaped = False for char in command : if char == '%' : escaped = True continue if escaped : try : translated . extend ( subst [ char ] ) escaped = False continue except KeyError : raise ValueError ( "Unknown digraph `%{0}`" " in proxy command string `{1}`" . format ( char , command ) ) else : translated . append ( char ) continue return '' . join ( translated )
Expand spacial digraphs %h %p and %r .
16,424
def get_num_processors ( ) : try : return os . cpu_count ( ) except AttributeError : pass try : import multiprocessing return multiprocessing . cpu_count ( ) except ImportError : pass except NotImplementedError : pass try : from subprocess32 import check_output ncpus = check_output ( 'nproc' ) return int ( ncpus ) except CalledProcessError : pass except ( ValueError , TypeError ) : pass except ImportError : pass try : from subprocess import check_output ncpus = check_output ( 'nproc' ) return int ( ncpus ) except CalledProcessError : pass except ( ValueError , TypeError ) : pass except ImportError : pass raise RuntimeError ( "Cannot determine number of processors" )
Return number of online processor cores .
16,425
def sighandler ( signum , handler ) : prev_handler = signal . getsignal ( signum ) signal . signal ( signum , handler ) yield signal . signal ( signum , prev_handler )
Context manager to run code with UNIX signal signum bound to handler .
16,426
def temporary_dir ( delete = True , dir = None , prefix = 'elasticluster.' , suffix = '.d' ) : cwd = os . getcwd ( ) tmpdir = tempfile . mkdtemp ( suffix , prefix , dir ) os . chdir ( tmpdir ) yield os . chdir ( cwd ) if delete : shutil . rmtree ( tmpdir , ignore_errors = True )
Make a temporary directory and make it current for the code in this context .
16,427
def timeout ( delay , handler = None ) : delay = int ( delay ) if handler is None : def default_handler ( signum , frame ) : raise RuntimeError ( "{:d} seconds timeout expired" . format ( delay ) ) handler = default_handler prev_sigalrm_handler = signal . getsignal ( signal . SIGALRM ) signal . signal ( signal . SIGALRM , handler ) signal . alarm ( delay ) yield signal . alarm ( 0 ) signal . signal ( signal . SIGALRM , prev_sigalrm_handler )
Context manager to run code and deliver a SIGALRM signal after delay seconds .
16,428
def format_warning_oneline ( message , category , filename , lineno , file = None , line = None ) : return ( '{category}: {message}' . format ( message = message , category = category . __name__ ) )
Format a warning for logging .
16,429
def redirect_warnings ( capture = True , logger = 'py.warnings' ) : global _warnings_showwarning if capture : assert _warnings_showwarning is None _warnings_showwarning = warnings . showwarning warnings . showwarning = _WarningsLogger ( logger , format_warning_oneline ) . __call__ else : assert _warnings_showwarning is not None warnings . showwarning = _warnings_showwarning _warnings_showwarning = None
If capture is true redirect all warnings to the logging package . If capture is False ensure that warnings are not redirected to logging but to their original destinations .
16,430
def start_instance ( self , key_name , public_key_path , private_key_path , security_group , flavor , image_id , image_userdata , username = None , node_name = None ) : pass
Starts a new instance on the cloud using the given properties . Multiple instances might be started in different threads at the same time . The implementation should handle any problems regarding this itself .
16,431
def __get_name_or_id ( values , known ) : result = list ( ) for element in [ e . strip ( ) for e in values . split ( ',' ) ] : for item in [ i for i in known if i . name == element or i . id == element ] : result . append ( item ) return result
Return list of values that match attribute . id or . name of any object in list known .
16,432
def _assemble_linux_cmdline ( kv ) : parts = [ ] for k , v in kv . items ( ) : if v is None : parts . append ( str ( k ) ) else : parts . append ( '%s=%s' % ( k , v ) ) return ' ' . join ( parts )
Given a dictionary assemble a Linux boot command line .
16,433
def _edit_linux_cmdline ( cmdline , state , name , value = None ) : kv = _parse_linux_cmdline ( cmdline ) if state == 'absent' : try : del kv [ name ] except KeyError : pass elif state == 'present' : kv [ name ] = value return _assemble_linux_cmdline ( kv )
Return a new Linux command line with parameter name added replaced or removed .
16,434
def execute ( self ) : cluster_template = self . params . cluster if self . params . cluster_name : cluster_name = self . params . cluster_name else : cluster_name = self . params . cluster creator = make_creator ( self . params . config , storage_path = self . params . storage ) if cluster_template not in creator . cluster_conf : raise ClusterNotFound ( "No cluster template named `{0}`" . format ( cluster_template ) ) cluster_nodes_conf = creator . cluster_conf [ cluster_template ] [ 'nodes' ] for kind , num in self . params . nodes_override . items ( ) : if kind not in cluster_nodes_conf : raise ConfigurationError ( "No node group `{kind}` defined" " in cluster template `{template}`" . format ( kind = kind , template = cluster_template ) ) cluster_nodes_conf [ kind ] [ 'num' ] = num try : cluster = creator . load_cluster ( cluster_name ) except ClusterNotFound : try : cluster = creator . create_cluster ( cluster_template , cluster_name ) except ConfigurationError as err : log . error ( "Starting cluster %s: %s" , cluster_template , err ) return try : print ( "Starting cluster `{0}` with:" . format ( cluster . name ) ) for cls in cluster . nodes : print ( "* {0:d} {1} nodes." . format ( len ( cluster . nodes [ cls ] ) , cls ) ) print ( "(This may take a while...)" ) min_nodes = dict ( ( kind , cluster_nodes_conf [ kind ] [ 'min_num' ] ) for kind in cluster_nodes_conf ) cluster . start ( min_nodes , self . params . max_concurrent_requests ) if self . params . no_setup : print ( "NOT configuring the cluster as requested." ) else : print ( "Configuring the cluster ..." ) print ( "(this too may take a while)" ) ok = cluster . setup ( ) if ok : print ( "\nYour cluster `{0}` is ready!" . format ( cluster . name ) ) else : print ( "\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!" . format ( cluster . name ) ) print ( cluster_summary ( cluster ) ) except ( KeyError , ImageError , SecurityGroupError , ClusterError ) as err : log . error ( "Could not start cluster `%s`: %s" , cluster . name , err ) raise
Starts a new cluster .
16,435
def execute ( self ) : cluster_name = self . params . cluster creator = make_creator ( self . params . config , storage_path = self . params . storage ) try : cluster = creator . load_cluster ( cluster_name ) except ( ClusterNotFound , ConfigurationError ) as err : log . error ( "Cannot stop cluster `%s`: %s" , cluster_name , err ) return os . EX_NOINPUT if not self . params . yes : confirm_or_abort ( "Do you want really want to stop cluster `{cluster_name}`?" . format ( cluster_name = cluster_name ) , msg = "Aborting upon user request." ) print ( "Destroying cluster `%s` ..." % cluster_name ) cluster . stop ( force = self . params . force , wait = self . params . wait )
Stops the cluster if it s running .
16,436
def execute ( self ) : cluster_name = self . params . cluster creator = make_creator ( self . params . config , storage_path = self . params . storage ) try : cluster = creator . load_cluster ( cluster_name ) except ( ClusterNotFound , ConfigurationError ) as e : log . error ( "Cannot load cluster `%s`: %s" , cluster_name , e ) return os . EX_NOINPUT if not self . params . yes : confirm_or_abort ( "Do you want really want to pause cluster `{cluster_name}`?" . format ( cluster_name = cluster_name ) , msg = "Aborting upon user request." ) print ( "Pausing cluster `%s` ..." % cluster_name ) cluster . pause ( )
Pause the cluster if it is running .
16,437
def execute ( self ) : cluster_name = self . params . cluster creator = make_creator ( self . params . config , storage_path = self . params . storage ) try : cluster = creator . load_cluster ( cluster_name ) except ( ClusterNotFound , ConfigurationError ) as e : log . error ( "Cannot load cluster `%s`: %s" , cluster_name , e ) return os . EX_NOINPUT print ( "Resuming cluster `%s` ..." % cluster_name ) cluster . resume ( )
Resume the cluster if it is paused .
16,438
def execute ( self ) : creator = make_creator ( self . params . config , storage_path = self . params . storage ) cluster_name = self . params . cluster try : cluster = creator . load_cluster ( cluster_name ) if self . params . update : cluster . update ( ) except ( ClusterNotFound , ConfigurationError ) as ex : log . error ( "Listing nodes from cluster %s: %s" , cluster_name , ex ) return if self . params . pretty_json : print ( json . dumps ( cluster , default = dict , indent = 4 ) ) elif self . params . json : print ( json . dumps ( cluster , default = dict ) ) else : print ( cluster_summary ( cluster ) ) for cls in cluster . nodes : print ( "%s nodes:" % cls ) print ( "" ) for node in cluster . nodes [ cls ] : txt = [ " " + i for i in node . pprint ( ) . splitlines ( ) ] print ( ' - ' + "\n" . join ( txt ) [ 4 : ] ) print ( "" )
Lists all nodes within the specified cluster with certain information like id and ip .
16,439
def keys ( self ) : keys = Struct . keys ( self ) for key in ( '_cloud_provider' , '_naming_policy' , '_setup_provider' , 'known_hosts_file' , 'repository' , ) : if key in keys : keys . remove ( key ) return keys
Only expose some of the attributes when using as a dictionary
16,440
def add_node ( self , kind , image_id , image_user , flavor , security_group , image_userdata = '' , name = None , ** extra ) : if not self . _NODE_KIND_RE . match ( kind ) : raise ValueError ( "Invalid name `{kind}`. The `kind` argument may only contain" " alphanumeric characters, and must not end with a digit." . format ( kind = kind ) ) if kind not in self . nodes : self . nodes [ kind ] = [ ] extra . update ( cloud_provider = self . _cloud_provider , cluster_name = self . name , flavor = flavor , image_id = image_id , image_user = image_user , image_userdata = image_userdata , kind = kind , security_group = security_group , ) for attr in ( 'flavor' , 'image_id' , 'image_user' , 'image_userdata' , 'security_group' , 'user_key_name' , 'user_key_private' , 'user_key_public' , ) : if attr not in extra : extra [ attr ] = getattr ( self , attr ) if not name : name = self . _naming_policy . new ( ** extra ) else : self . _naming_policy . use ( kind , name ) node = Node ( name = name , ** extra ) self . nodes [ kind ] . append ( node ) return node
Adds a new node to the cluster . This factory method provides an easy way to add a new node to the cluster by specifying all relevant parameters . The node does not get started nor setup automatically this has to be done manually afterwards .
16,441
def add_nodes ( self , kind , num , image_id , image_user , flavor , security_group , image_userdata = '' , ** extra ) : for i in range ( num ) : self . add_node ( kind , image_id , image_user , flavor , security_group , image_userdata = image_userdata , ** extra )
Helper method to add multiple nodes of the same kind to a cluster .
16,442
def _check_starting_nodes ( self , nodes , lapse ) : with timeout ( lapse , raise_timeout_error ) : try : while nodes : nodes = set ( node for node in nodes if not node . is_alive ( ) ) if nodes : log . debug ( "Waiting for %d more nodes to come up ..." , len ( nodes ) ) time . sleep ( self . polling_interval ) except TimeoutError : log . error ( "Some nodes did not start correctly" " within the given %d-seconds timeout: %s" , lapse , ', ' . join ( node . name for node in nodes ) ) return nodes
Wait until all given nodes are alive for max lapse seconds .
16,443
def _gather_node_ip_addresses ( self , nodes , lapse , ssh_timeout , remake = False ) : known_hosts_path = self . known_hosts_file if remake and os . path . isfile ( known_hosts_path ) : os . remove ( known_hosts_path ) try : fd = open ( known_hosts_path , 'a' ) fd . close ( ) except IOError as err : log . warning ( "Error opening SSH 'known hosts' file `%s`: %s" , known_hosts_path , err ) known_hosts_path = None keys = paramiko . hostkeys . HostKeys ( known_hosts_path ) with timeout ( lapse , raise_timeout_error ) : try : while nodes : for node in copy ( nodes ) : ssh = node . connect ( keyfile = known_hosts_path , timeout = ssh_timeout ) if ssh : log . info ( "Connection to node `%s` successful," " using IP address %s to connect." , node . name , node . connection_ip ( ) ) for host , key in ssh . get_host_keys ( ) . items ( ) : for keytype , keydata in key . items ( ) : keys . add ( host , keytype , keydata ) self . _save_keys_to_known_hosts_file ( keys ) nodes . remove ( node ) if nodes : time . sleep ( self . polling_interval ) except TimeoutError : log . error ( "Some nodes of the cluster were unreachable" " within the given %d-seconds timeout: %s" , lapse , ', ' . join ( node . name for node in nodes ) ) return nodes
Connect via SSH to each node .
16,444
def stop ( self , force = False , wait = False ) : log . debug ( "Stopping cluster `%s` ..." , self . name ) failed = self . _stop_all_nodes ( wait ) if failed : if force : self . _delete_saved_data ( ) log . warning ( "Not all cluster nodes have been terminated." " However, as requested, data about the cluster" " has been removed from local storage." ) else : self . repository . save_or_update ( self ) log . warning ( "Not all cluster nodes have been terminated." " Fix errors above and re-run `elasticluster stop %s`" , self . name ) else : self . _delete_saved_data ( )
Terminate all VMs in this cluster and delete its repository .
16,445
def pause ( self ) : log . info ( "Pausing cluster `%s` ..." , self . name ) failed = self . _pause_all_nodes ( ) if os . path . exists ( self . known_hosts_file ) : os . remove ( self . known_hosts_file ) self . repository . save_or_update ( self ) if failed : log . warning ( "Not all cluster nodes have been successfully " "stopped. Some nodes may still be running - " "check error messages above and consider " "re-running `elasticluster pause %s` if " "necessary." , self . name )
Pause all VMs in this cluster and store data so that they can be restarted later .
16,446
def resume ( self ) : log . info ( "Resuming cluster `%s` ..." , self . name ) failed = self . _resume_all_nodes ( ) for node in self . get_all_nodes ( ) : node . update_ips ( ) self . _gather_node_ip_addresses ( self . get_all_nodes ( ) , self . start_timeout , self . ssh_probe_timeout ) self . repository . save_or_update ( self ) if failed : log . warning ( "Not all cluster nodes have been successfully " "restarted. Check error messages above and consider " "re-running `elasticluster resume %s` if " "necessary." , self . name ) return if not self . _setup_provider . resume_cluster ( self ) : log . warning ( "Elasticluster was not able to guarantee that the " "cluster restarted correctly - check the errors " "above and check your config." )
Resume all paused VMs in this cluster .
16,447
def _stop_all_nodes ( self , wait = False ) : failed = 0 for node in self . get_all_nodes ( ) : if not node . instance_id : log . warning ( "Node `%s` has no instance ID." " Assuming it did not start correctly," " so removing it anyway from the cluster." , node . name ) self . nodes [ node . kind ] . remove ( node ) continue try : node . stop ( wait ) self . nodes [ node . kind ] . remove ( node ) log . debug ( "Removed node `%s` from cluster `%s`" , node . name , self . name ) except InstanceNotFoundError as err : log . info ( "Node `%s` (instance ID `%s`) was not found;" " assuming it has already been terminated." , node . name , node . instance_id ) except Exception as err : failed += 1 log . error ( "Could not stop node `%s` (instance ID `%s`): %s %s" , node . name , node . instance_id , err , err . __class__ ) return failed
Terminate all cluster nodes . Return number of failures .
16,448
def _pause_all_nodes ( self , max_thread_pool_size = 0 ) : failed = 0 def _pause_specific_node ( node ) : if not node . instance_id : log . warning ( "Node `%s` has no instance id." " It is either already stopped, or" " never created properly. Not attempting" " to stop it again." , node . name ) return None try : return node . pause ( ) except Exception as err : log . error ( "Could not stop node `%s` (instance ID `%s`): %s %s" , node . name , node . instance_id , err , err . __class__ ) node . update_ips ( ) return None nodes = self . get_all_nodes ( ) thread_pool = self . _make_thread_pool ( max_thread_pool_size ) for node , state in zip ( nodes , thread_pool . map ( _pause_specific_node , nodes ) ) : if state is None : failed += 1 else : self . paused_nodes [ node . name ] = state return failed
Pause all cluster nodes - ensure that we store data so that in the future the nodes can be restarted .
16,449
def setup ( self , extra_args = tuple ( ) ) : try : ret = self . _setup_provider . setup_cluster ( self , extra_args ) except Exception as err : log . error ( "The cluster hosts are up and running," " but %s failed to set the cluster up: %s" , self . _setup_provider . HUMAN_READABLE_NAME , err ) ret = False if not ret : log . warning ( "Cluster `%s` not yet configured. Please, re-run " "`elasticluster setup %s` and/or check your configuration" , self . name , self . name ) return ret
Configure the cluster nodes .
16,450
def update ( self ) : for node in self . get_all_nodes ( ) : try : node . update_ips ( ) if node . ips and not ( node . preferred_ip and node . preferred_ip in node . ips ) : node . connect ( ) except InstanceError as ex : log . warning ( "Ignoring error updating information on node %s: %s" , node , ex ) self . repository . save_or_update ( self )
Update connection information of all nodes in this cluster .
16,451
def start ( self ) : log . info ( "Starting node `%s` from image `%s` with flavor %s ..." , self . name , self . image_id , self . flavor ) self . instance_id = self . _cloud_provider . start_instance ( self . user_key_name , self . user_key_public , self . user_key_private , self . security_group , self . flavor , self . image_id , self . image_userdata , username = self . image_user , node_name = ( "%s-%s" % ( self . cluster_name , self . name ) ) , ** self . extra ) log . debug ( "Node `%s` has instance ID `%s`" , self . name , self . instance_id )
Start the node on the cloud using the given instance properties .
16,452
def stop ( self , wait = False ) : if self . instance_id is not None : log . info ( "Shutting down node `%s` (VM instance `%s`) ..." , self . name , self . instance_id ) self . _cloud_provider . stop_instance ( self . instance_id ) if wait : while self . is_alive ( ) : time . sleep ( 1 ) self . instance_id = None
Terminate the VM instance launched on the cloud for this specific node .
16,453
def pause ( self ) : if self . instance_id is None : raise ValueError ( "Trying to stop unstarted node." ) resp = self . _cloud_provider . pause_instance ( self . instance_id ) self . preferred_ip = None return resp
Pause the VM instance and return the info needed to restart it .
16,454
def connect ( self , keyfile = None , timeout = 5 ) : ssh = paramiko . SSHClient ( ) ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) ) if keyfile and os . path . exists ( keyfile ) : ssh . load_host_keys ( keyfile ) ips = self . ips [ : ] if self . preferred_ip : if self . preferred_ip in ips : ips . remove ( self . preferred_ip ) else : log . debug ( "IP address %s does not seem to belong to %s anymore." " Ignoring it." , self . preferred_ip , self . name ) self . preferred_ip = ips [ 0 ] for ip in itertools . chain ( [ self . preferred_ip ] , ips ) : if not ip : continue log . debug ( "Trying to connect to host %s using IP address %s ..." , self . name , ip ) try : addr , port = parse_ip_address_and_port ( ip , SSH_PORT ) extra = { 'allow_agent' : True , 'key_filename' : self . user_key_private , 'look_for_keys' : False , 'timeout' : timeout , 'username' : self . image_user , } if self . ssh_proxy_command : proxy_command = expand_ssh_proxy_command ( self . ssh_proxy_command , self . image_user , addr , port ) from paramiko . proxy import ProxyCommand extra [ 'sock' ] = ProxyCommand ( proxy_command ) log . debug ( "Using proxy command `%s`." , proxy_command ) ssh . connect ( str ( addr ) , port = port , ** extra ) log . debug ( "Connection to %s succeeded on port %d," " will use this IP address for future connections." , ip , port ) if ip != self . preferred_ip : self . preferred_ip = ip return ssh except socket . error as ex : log . debug ( "Host %s (%s) not reachable within %d seconds: %s -- %r" , self . name , ip , timeout , ex , type ( ex ) ) except paramiko . BadHostKeyException as ex : log . error ( "Invalid SSH host key for %s (%s): %s." , self . name , ip , ex ) except paramiko . SSHException as ex : log . debug ( "Ignoring error connecting to %s: %s -- %r" , self . name , ex , type ( ex ) ) return None
Connect to the node via SSH .
16,455
def _connect ( self ) : if self . _ec2_connection : return self . _ec2_connection try : log . debug ( "Connecting to EC2 endpoint %s" , self . _ec2host ) ec2_connection = boto . ec2 . connect_to_region ( self . _region_name , aws_access_key_id = self . _access_key , aws_secret_access_key = self . _secret_key , is_secure = self . _secure , host = self . _ec2host , port = self . _ec2port , path = self . _ec2path , ) if ec2_connection . host : log . debug ( "EC2 connection has been successful." ) else : raise CloudProviderError ( "Cannot establish connection to EC2 region {0}" . format ( self . _region_name ) ) if not self . _vpc : vpc_connection = None self . _vpc_id = None else : vpc_connection , self . _vpc_id = self . _find_vpc_by_name ( self . _vpc ) except Exception as err : log . error ( "Error connecting to EC2: %s" , err ) raise self . _ec2_connection , self . _vpc_connection = ( ec2_connection , vpc_connection ) return self . _ec2_connection
Connect to the EC2 cloud provider .
16,456
def get_ips ( self , instance_id ) : self . _load_instance ( instance_id ) instance = self . _load_instance ( instance_id ) IPs = [ ip for ip in ( instance . private_ip_address , instance . ip_address ) if ip ] if self . request_floating_ip and not self . _vpc : floating_ips = [ ip for ip in self . _ec2_connection . get_all_addresses ( ) if ip . instance_id == instance . id ] if not floating_ips : log . debug ( "Public ip address has to be assigned through " "elasticluster." ) ip = self . _allocate_address ( instance ) IPs . insert ( 0 , ip ) else : IPs = [ ip . public_ip for ip in floating_ips ] + IPs return list ( set ( IPs ) )
Retrieves the private and public ip addresses for a given instance .
16,457
def _allocate_address ( self , instance ) : connection = self . _connect ( ) free_addresses = [ ip for ip in connection . get_all_addresses ( ) if not ip . instance_id ] if not free_addresses : try : address = connection . allocate_address ( ) except Exception as ex : log . error ( "Unable to allocate a public IP address to instance `%s`" , instance . id ) return None try : address = free_addresses . pop ( ) instance . use_ip ( address ) return address . public_ip except Exception as ex : log . error ( "Unable to associate IP address %s to instance `%s`" , address , instance . id ) return None
Allocates a free public ip address to the given instance
16,458
def _build_cached_instances ( self ) : connection = self . _connect ( ) reservations = connection . get_all_reservations ( ) cached_instances = { } for rs in reservations : for vm in rs . instances : cached_instances [ vm . id ] = vm return cached_instances
Build lookup table of VM instances known to the cloud provider .
16,459
def _check_security_group ( self , name ) : connection = self . _connect ( ) filters = { } if self . _vpc : filters = { 'vpc-id' : self . _vpc_id } security_groups = connection . get_all_security_groups ( filters = filters ) matching_groups = [ group for group in security_groups if name in [ group . name , group . id ] ] if len ( matching_groups ) == 0 : raise SecurityGroupError ( "the specified security group %s does not exist" % name ) elif len ( matching_groups ) == 1 : return matching_groups [ 0 ] . id elif self . _vpc and len ( matching_groups ) > 1 : raise SecurityGroupError ( "the specified security group name %s matches " "more than one security group" % name )
Checks if the security group exists .
16,460
def _check_subnet ( self , name ) : subnets = self . _vpc_connection . get_all_subnets ( filters = { 'vpcId' : self . _vpc_id } ) matching_subnets = [ subnet for subnet in subnets if name in [ subnet . tags . get ( 'Name' ) , subnet . id ] ] if len ( matching_subnets ) == 0 : raise SubnetError ( "the specified subnet %s does not exist" % name ) elif len ( matching_subnets ) == 1 : return matching_subnets [ 0 ] . id else : raise SubnetError ( "the specified subnet name %s matches more than " "one subnet" % name )
Checks if the subnet exists .
16,461
def _find_image_id ( self , image_id ) : if not self . _images : connection = self . _connect ( ) self . _images = connection . get_all_images ( ) image_id_cloud = None for i in self . _images : if i . id == image_id or i . name == image_id : image_id_cloud = i . id break if image_id_cloud : return image_id_cloud else : raise ImageError ( "Could not find given image id `%s`" % image_id )
Finds an image id to a given id or name .
16,462
def migrate_cluster ( cluster ) : for old , new in [ ( '_user_key_public' , 'user_key_public' ) , ( '_user_key_private' , 'user_key_private' ) , ( '_user_key_name' , 'user_key_name' ) , ] : if hasattr ( cluster , old ) : setattr ( cluster , new , getattr ( cluster , old ) ) delattr ( cluster , old ) for kind , nodes in cluster . nodes . items ( ) : for node in nodes : if hasattr ( node , 'image' ) : image_id = getattr ( node , 'image_id' , None ) or node . image setattr ( node , 'image_id' , image_id ) delattr ( node , 'image' ) if not hasattr ( cluster , 'thread_pool_max_size' ) : cluster . thread_pool_max_size = 10 return cluster
Called when loading a cluster when it comes from an older version of elasticluster
16,463
def get ( self , name ) : if name not in self . clusters : raise ClusterNotFound ( "Cluster %s not found." % name ) return self . clusters . get ( name )
Retrieves the cluster by the given name .
16,464
def delete ( self , cluster ) : if cluster . name not in self . clusters : raise ClusterNotFound ( "Unable to delete non-existent cluster %s" % cluster . name ) del self . clusters [ cluster . name ]
Deletes the cluster from memory .
16,465
def get_all ( self ) : clusters = [ ] cluster_files = glob . glob ( "%s/*.%s" % ( self . storage_path , self . file_ending ) ) for fname in cluster_files : try : name = fname [ : - len ( self . file_ending ) - 1 ] clusters . append ( self . get ( name ) ) except ( ImportError , AttributeError ) as ex : log . error ( "Unable to load cluster %s: `%s`" , fname , ex ) log . error ( "If cluster %s was created with a previous version of elasticluster, you may need to run `elasticluster migrate %s %s` to update it." , cluster_file , self . storage_path , fname ) return clusters
Retrieves all clusters from the persistent state .
16,466
def delete ( self , cluster ) : path = self . _get_cluster_storage_path ( cluster . name ) if os . path . exists ( path ) : os . unlink ( path )
Deletes the cluster from persistent state .
16,467
def load ( self , fp ) : cluster = pickle . load ( fp ) cluster . repository = self return cluster
Load cluster from file descriptor fp
16,468
def initdb ( ) : click . echo ( 'Init the db...' ) db . create_all ( ) for i in range ( 30 ) : click . echo ( "Creating user/address combo #{}..." . format ( i ) ) address = Address ( description = 'Address#2' + str ( i ) . rjust ( 2 , "0" ) ) db . session . add ( address ) user = User ( name = 'User#1' + str ( i ) . rjust ( 2 , "0" ) ) user . address = address db . session . add ( user ) sleep ( 1 ) db . session . commit ( )
Initialize the database .
16,469
def clean_regex ( regex ) : ret_regex = regex escape_chars = '[^$.?*+(){}' ret_regex = ret_regex . replace ( '\\' , '' ) for c in escape_chars : ret_regex = ret_regex . replace ( c , '\\' + c ) while True : old_regex = ret_regex ret_regex = ret_regex . replace ( '||' , '|' ) if old_regex == ret_regex : break while len ( ret_regex ) >= 1 and ret_regex [ - 1 ] == '|' : ret_regex = ret_regex [ : - 1 ] return ret_regex
Escape any regex special characters other than alternation .
16,470
def output_result ( self ) : output = { } output [ 'draw' ] = str ( int ( self . params . get ( 'draw' , 1 ) ) ) output [ 'recordsTotal' ] = str ( self . cardinality ) output [ 'recordsFiltered' ] = str ( self . cardinality_filtered ) if self . error : output [ 'error' ] = self . error return output output [ 'data' ] = self . results for k , v in self . yadcf_params : output [ k ] = v return output
Output results in the format needed by DataTables .
16,471
def run ( self ) : query = self . query self . cardinality = query . add_columns ( self . columns [ 0 ] . sqla_expr ) . count ( ) self . _set_column_filter_expressions ( ) self . _set_global_filter_expression ( ) self . _set_sort_expressions ( ) self . _set_yadcf_data ( query ) query = query . filter ( * [ e for e in self . filter_expressions if e is not None ] ) self . cardinality_filtered = query . add_columns ( self . columns [ 0 ] . sqla_expr ) . count ( ) query = query . order_by ( * [ e for e in self . sort_expressions if e is not None ] ) length = int ( self . params . get ( 'length' ) ) if length >= 0 : query = query . limit ( length ) elif length == - 1 : pass else : raise ( ValueError ( 'Length should be a positive integer or -1 to disable' ) ) query = query . offset ( int ( self . params . get ( 'start' ) ) ) query = query . add_columns ( * [ c . sqla_expr for c in self . columns ] ) column_names = [ col . mData if col . mData else str ( i ) for i , col in enumerate ( self . columns ) ] self . results = [ { k : v for k , v in zip ( column_names , row ) } for row in query . all ( ) ]
Launch filtering sorting and paging to output results .
16,472
def parse_query_value ( combined_value ) : split = len ( combined_value ) - len ( combined_value . lstrip ( '<>=' ) ) operator = combined_value [ : split ] if operator == '' : operator = '=' try : operator_func = search_operators [ operator ] except KeyError : raise ValueError ( 'Numeric query should start with operator, choose from %s' % ', ' . join ( search_operators . keys ( ) ) ) value = combined_value [ split : ] . strip ( ) return operator_func , value
Parse value in form of > value to a lambda and a value .
16,473
def home ( request ) : try : DBSession . query ( User ) . first ( ) except DBAPIError : return Response ( conn_err_msg , content_type = "text/plain" , status_int = 500 , ) return { "project" : "pyramid_tut" }
Try to connect to database and list available examples .
16,474
def main ( global_config , ** settings ) : engine = engine_from_config ( settings , "sqlalchemy." ) DBSession . configure ( bind = engine ) Base . metadata . bind = engine config = Configurator ( settings = settings ) config . include ( "pyramid_jinja2" ) config . include ( "pyramid_debugtoolbar" ) config . add_route ( "home" , "/" ) config . add_route ( "data" , "/data" ) config . add_route ( "data_advanced" , "/data_advanced" ) config . add_route ( "data_yadcf" , "/data_yadcf" ) config . add_route ( "dt_110x" , "/dt_110x" ) config . add_route ( "dt_110x_custom_column" , "/dt_110x_custom_column" ) config . add_route ( "dt_110x_basic_column_search" , "/dt_110x_basic_column_search" ) config . add_route ( "dt_110x_advanced_column_search" , "/dt_110x_advanced_column_search" ) config . add_route ( "dt_110x_yadcf" , "/dt_110x_yadcf" ) config . scan ( ) json_renderer = JSON ( ) json_renderer . add_adapter ( date , date_adapter ) config . add_renderer ( "json_with_dates" , json_renderer ) config . add_jinja2_renderer ( '.html' ) return config . make_wsgi_app ( )
Return a Pyramid WSGI application .
16,475
def main ( argv = sys . argv ) : if len ( argv ) < 2 : usage ( argv ) config_uri = argv [ 1 ] setup_logging ( config_uri ) settings = get_appsettings ( config_uri ) engine = engine_from_config ( settings , "sqlalchemy." ) DBSession . configure ( bind = engine ) Base . metadata . create_all ( engine ) for i in range ( 30 ) : with transaction . manager : address = Address ( description = "Address#2" + str ( i ) . rjust ( 2 , "0" ) ) DBSession . add ( address ) user = User ( name = "User#1" + str ( i ) . rjust ( 2 , "0" ) , birthday = date ( 1980 + i % 8 , i % 12 + 1 , i % 10 + 1 ) ) user . address = address DBSession . add ( user )
Populate database with 30 users .
16,476
def _parse_logline_timestamp ( t ) : date , time = t . split ( ' ' ) month , day = date . split ( '-' ) h , m , s = time . split ( ':' ) s , ms = s . split ( '.' ) return ( month , day , h , m , s , ms )
Parses a logline timestamp into a tuple .
16,477
def logline_timestamp_comparator ( t1 , t2 ) : dt1 = _parse_logline_timestamp ( t1 ) dt2 = _parse_logline_timestamp ( t2 ) for u1 , u2 in zip ( dt1 , dt2 ) : if u1 < u2 : return - 1 elif u1 > u2 : return 1 return 0
Comparator for timestamps in logline format .
16,478
def epoch_to_log_line_timestamp ( epoch_time , time_zone = None ) : s , ms = divmod ( epoch_time , 1000 ) d = datetime . datetime . fromtimestamp ( s , tz = time_zone ) return d . strftime ( '%m-%d %H:%M:%S.' ) + str ( ms )
Converts an epoch timestamp in ms to log line timestamp format which is readible for humans .
16,479
def run_suite ( test_classes , argv = None ) : parser = argparse . ArgumentParser ( description = 'Mobly Suite Executable.' ) parser . add_argument ( '-c' , '--config' , nargs = 1 , type = str , required = True , metavar = '<PATH>' , help = 'Path to the test configuration file.' ) parser . add_argument ( '--tests' , '--test_case' , nargs = '+' , type = str , metavar = '[ClassA[.test_a] ClassB[.test_b] ...]' , help = 'A list of test classes and optional tests to execute.' ) if not argv : argv = sys . argv [ 1 : ] args = parser . parse_args ( argv ) test_configs = config_parser . load_test_config_file ( args . config [ 0 ] ) for test_class in test_classes : if not issubclass ( test_class , base_test . BaseTestClass ) : logging . error ( 'Test class %s does not extend ' 'mobly.base_test.BaseTestClass' , test_class ) sys . exit ( 1 ) selected_tests = compute_selected_tests ( test_classes , args . tests ) ok = True for config in test_configs : runner = test_runner . TestRunner ( config . log_path , config . test_bed_name ) for ( test_class , tests ) in selected_tests . items ( ) : runner . add_test_class ( config , test_class , tests ) try : runner . run ( ) ok = runner . results . is_all_pass and ok except signals . TestAbortAll : pass except : logging . exception ( 'Exception when executing %s.' , config . test_bed_name ) ok = False if not ok : sys . exit ( 1 )
Executes multiple test classes as a suite .
16,480
def load_device ( self , serial = None ) : serials = android_device . list_adb_devices ( ) if not serials : raise Error ( 'No adb device found!' ) if not serial : env_serial = os . environ . get ( 'ANDROID_SERIAL' , None ) if env_serial is not None : serial = env_serial elif len ( serials ) == 1 : serial = serials [ 0 ] else : raise Error ( 'Expected one phone, but %d found. Use the -s flag or ' 'specify ANDROID_SERIAL.' % len ( serials ) ) if serial not in serials : raise Error ( 'Device "%s" is not found by adb.' % serial ) ads = android_device . get_instances ( [ serial ] ) assert len ( ads ) == 1 self . _ad = ads [ 0 ]
Creates an AndroidDevice for the given serial number .
16,481
def open ( self , host , port = 23 ) : self . _telnet_client . open ( host , port ) config_str = self . _telnet_client . cmd ( "MN?" ) if config_str . startswith ( "MN=" ) : config_str = config_str [ len ( "MN=" ) : ] self . properties = dict ( zip ( [ 'model' , 'max_freq' , 'max_atten' ] , config_str . split ( "-" , 2 ) ) ) self . max_atten = float ( self . properties [ 'max_atten' ] )
Opens a telnet connection to the desired AttenuatorDevice and queries basic information .
16,482
def set_atten ( self , idx , value ) : if not self . is_open : raise attenuator . Error ( "Connection to attenuator at %s is not open!" % self . _telnet_client . host ) if idx + 1 > self . path_count : raise IndexError ( "Attenuator index out of range!" , self . path_count , idx ) if value > self . max_atten : raise ValueError ( "Attenuator value out of range!" , self . max_atten , value ) self . _telnet_client . cmd ( "CHAN:%s:SETATT:%s" % ( idx + 1 , value ) )
Sets the attenuation value for a particular signal path .
16,483
def get_atten ( self , idx = 0 ) : if not self . is_open : raise attenuator . Error ( "Connection to attenuator at %s is not open!" % self . _telnet_client . host ) if idx + 1 > self . path_count or idx < 0 : raise IndexError ( "Attenuator index out of range!" , self . path_count , idx ) atten_val_str = self . _telnet_client . cmd ( "CHAN:%s:ATT?" % ( idx + 1 ) ) atten_val = float ( atten_val_str ) return atten_val
This function returns the current attenuation from an attenuator at a given index in the instrument .
16,484
def restore_app_connection ( self , port = None ) : self . host_port = port or utils . get_available_host_port ( ) self . _retry_connect ( ) self . ed = self . _start_event_client ( )
Restores the sl4a after device got disconnected .
16,485
def restore_app_connection ( self , port = None ) : self . host_port = port or utils . get_available_host_port ( ) self . _adb . forward ( [ 'tcp:%d' % self . host_port , 'tcp:%d' % self . device_port ] ) try : self . connect ( ) except : self . log . exception ( 'Failed to re-connect to app.' ) raise jsonrpc_client_base . AppRestoreConnectionError ( self . _ad , ( 'Failed to restore app connection for %s at host port %s, ' 'device port %s' ) % ( self . package , self . host_port , self . device_port ) ) self . _proc = None self . _restore_event_client ( )
Restores the app after device got reconnected .
16,486
def _restore_event_client ( self ) : if not self . _event_client : self . _event_client = self . _start_event_client ( ) return self . _event_client . host_port = self . host_port self . _event_client . device_port = self . device_port self . _event_client . connect ( )
Restores previously created event client .
16,487
def _read_protocol_line ( self ) : while True : line = self . _proc . stdout . readline ( ) . decode ( 'utf-8' ) if not line : raise jsonrpc_client_base . AppStartError ( self . _ad , 'Unexpected EOF waiting for app to start' ) line = line . strip ( ) if ( line . startswith ( 'INSTRUMENTATION_RESULT:' ) or line . startswith ( 'SNIPPET ' ) ) : self . log . debug ( 'Accepted line from instrumentation output: "%s"' , line ) return line self . log . debug ( 'Discarded line from instrumentation output: "%s"' , line )
Reads the next line of instrumentation output relevant to snippets .
16,488
def _get_persist_command ( self ) : for command in [ _SETSID_COMMAND , _NOHUP_COMMAND ] : try : if command in self . _adb . shell ( [ 'which' , command ] ) . decode ( 'utf-8' ) : return command except adb . AdbError : continue self . log . warning ( 'No %s and %s commands available to launch instrument ' 'persistently, tests that depend on UiAutomator and ' 'at the same time performs USB disconnection may fail' , _SETSID_COMMAND , _NOHUP_COMMAND ) return ''
Check availability and return path of command if available .
16,489
def help ( self , print_output = True ) : help_text = self . _rpc ( 'help' ) if print_output : print ( help_text ) else : return help_text
Calls the help RPC which returns the list of RPC calls available .
16,490
def is_any_alive ( self ) : for service in self . _service_objects . values ( ) : if service . is_alive : return True return False
True if any service is alive ; False otherwise .
16,491
def unregister ( self , alias ) : if alias not in self . _service_objects : raise Error ( self . _device , 'No service is registered with alias "%s".' % alias ) service_obj = self . _service_objects . pop ( alias ) if service_obj . is_alive : with expects . expect_no_raises ( 'Failed to stop service instance "%s".' % alias ) : service_obj . stop ( )
Unregisters a service instance .
16,492
def unregister_all ( self ) : aliases = list ( self . _service_objects . keys ( ) ) for alias in aliases : self . unregister ( alias )
Safely unregisters all active instances .
16,493
def start_all ( self ) : for alias , service in self . _service_objects . items ( ) : if not service . is_alive : with expects . expect_no_raises ( 'Failed to start service "%s".' % alias ) : service . start ( )
Starts all inactive service instances .
16,494
def stop_all ( self ) : for alias , service in self . _service_objects . items ( ) : if service . is_alive : with expects . expect_no_raises ( 'Failed to stop service "%s".' % alias ) : service . stop ( )
Stops all active service instances .
16,495
def pause_all ( self ) : for alias , service in self . _service_objects . items ( ) : with expects . expect_no_raises ( 'Failed to pause service "%s".' % alias ) : service . pause ( )
Pauses all service instances .
16,496
def resume_all ( self ) : for alias , service in self . _service_objects . items ( ) : with expects . expect_no_raises ( 'Failed to pause service "%s".' % alias ) : service . resume ( )
Resumes all service instances .
16,497
def create ( configs ) : if not configs : raise Error ( ANDROID_DEVICE_EMPTY_CONFIG_MSG ) elif configs == ANDROID_DEVICE_PICK_ALL_TOKEN : ads = get_all_instances ( ) elif not isinstance ( configs , list ) : raise Error ( ANDROID_DEVICE_NOT_LIST_CONFIG_MSG ) elif isinstance ( configs [ 0 ] , dict ) : ads = get_instances_with_configs ( configs ) elif isinstance ( configs [ 0 ] , basestring ) : ads = get_instances ( configs ) else : raise Error ( 'No valid config found in: %s' % configs ) valid_ad_identifiers = list_adb_devices ( ) + list_adb_devices_by_usb_id ( ) for ad in ads : if ad . serial not in valid_ad_identifiers : raise DeviceError ( ad , 'Android device is specified in config but' ' is not attached.' ) _start_services_on_ads ( ads ) return ads
Creates AndroidDevice controller objects .
16,498
def destroy ( ads ) : for ad in ads : try : ad . services . stop_all ( ) except : ad . log . exception ( 'Failed to clean up properly.' )
Cleans up AndroidDevice objects .
16,499
def _start_services_on_ads ( ads ) : running_ads = [ ] for ad in ads : running_ads . append ( ad ) start_logcat = not getattr ( ad , KEY_SKIP_LOGCAT , DEFAULT_VALUE_SKIP_LOGCAT ) try : ad . services . register ( SERVICE_NAME_LOGCAT , logcat . Logcat , start_service = start_logcat ) except Exception : is_required = getattr ( ad , KEY_DEVICE_REQUIRED , DEFAULT_VALUE_DEVICE_REQUIRED ) if is_required : ad . log . exception ( 'Failed to start some services, abort!' ) destroy ( running_ads ) raise else : ad . log . exception ( 'Skipping this optional device because some ' 'services failed to start.' )
Starts long running services on multiple AndroidDevice objects .