idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
244,400
def get_default_keystone_session ( self , keystone_sentry , openstack_release = None , api_version = 2 ) : self . log . debug ( 'Authenticating keystone admin...' ) # 11 => xenial_queens if api_version == 3 or ( openstack_release and openstack_release >= 11 ) : client_class = keystone_client_v3 . Client api_version = 3 else : client_class = keystone_client . Client keystone_ip = keystone_sentry . info [ 'public-address' ] session , auth = self . get_keystone_session ( keystone_ip , api_version = api_version , username = 'admin' , password = 'openstack' , project_name = 'admin' , user_domain_name = 'admin_domain' , project_domain_name = 'admin_domain' ) client = client_class ( session = session ) # This populates the client.service_catalog client . auth_ref = auth . get_access ( session ) return session , client
Return a keystone session object and client object assuming standard default settings
235
13
244,401
def authenticate_keystone_admin ( self , keystone_sentry , user , password , tenant = None , api_version = None , keystone_ip = None , user_domain_name = None , project_domain_name = None , project_name = None ) : self . log . debug ( 'Authenticating keystone admin...' ) if not keystone_ip : keystone_ip = keystone_sentry . info [ 'public-address' ] # To support backward compatibility usage of this function if not project_name : project_name = tenant if api_version == 3 and not user_domain_name : user_domain_name = 'admin_domain' if api_version == 3 and not project_domain_name : project_domain_name = 'admin_domain' if api_version == 3 and not project_name : project_name = 'admin' return self . authenticate_keystone ( keystone_ip , user , password , api_version = api_version , user_domain_name = user_domain_name , project_domain_name = project_domain_name , project_name = project_name , admin_port = True )
Authenticates admin user with the keystone admin endpoint .
255
11
244,402
def authenticate_keystone_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating keystone user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) keystone_ip = urlparse . urlparse ( ep ) . hostname return self . authenticate_keystone ( keystone_ip , user , password , project_name = tenant )
Authenticates a regular user with the keystone public endpoint .
113
12
244,403
def authenticate_glance_admin ( self , keystone , force_v1_client = False ) : self . log . debug ( 'Authenticating glance admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'image' , interface = 'adminURL' ) if not force_v1_client and keystone . session : return glance_clientv2 . Client ( "2" , session = keystone . session ) else : return glance_client . Client ( ep , token = keystone . auth_token )
Authenticates admin user with glance .
121
7
244,404
def authenticate_heat_admin ( self , keystone ) : self . log . debug ( 'Authenticating heat admin...' ) ep = keystone . service_catalog . url_for ( service_type = 'orchestration' , interface = 'publicURL' ) if keystone . session : return heat_client . Client ( endpoint = ep , session = keystone . session ) else : return heat_client . Client ( endpoint = ep , token = keystone . auth_token )
Authenticates the admin user with heat .
105
8
244,405
def authenticate_nova_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating nova user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return nova_client . Client ( NOVA_CLIENT_VERSION , session = keystone . session , auth_url = ep ) elif novaclient . __version__ [ 0 ] >= "7" : return nova_client . Client ( NOVA_CLIENT_VERSION , username = user , password = password , project_name = tenant , auth_url = ep ) else : return nova_client . Client ( NOVA_CLIENT_VERSION , username = user , api_key = password , project_id = tenant , auth_url = ep )
Authenticates a regular user with nova - api .
201
11
244,406
def authenticate_swift_user ( self , keystone , user , password , tenant ) : self . log . debug ( 'Authenticating swift user ({})...' . format ( user ) ) ep = keystone . service_catalog . url_for ( service_type = 'identity' , interface = 'publicURL' ) if keystone . session : return swiftclient . Connection ( session = keystone . session ) else : return swiftclient . Connection ( authurl = ep , user = user , key = password , tenant_name = tenant , auth_version = '2.0' )
Authenticates a regular user with swift api .
128
9
244,407
def create_flavor ( self , nova , name , ram , vcpus , disk , flavorid = "auto" , ephemeral = 0 , swap = 0 , rxtx_factor = 1.0 , is_public = True ) : try : nova . flavors . find ( name = name ) except ( exceptions . NotFound , exceptions . NoUniqueMatch ) : self . log . debug ( 'Creating flavor ({})' . format ( name ) ) nova . flavors . create ( name , ram , vcpus , disk , flavorid , ephemeral , swap , rxtx_factor , is_public )
Create the specified flavor .
136
5
244,408
def glance_create_image ( self , glance , image_name , image_url , download_dir = 'tests' , hypervisor_type = None , disk_format = 'qcow2' , architecture = 'x86_64' , container_format = 'bare' ) : self . log . debug ( 'Creating glance image ({}) from ' '{}...' . format ( image_name , image_url ) ) # Download image http_proxy = os . getenv ( 'AMULET_HTTP_PROXY' ) self . log . debug ( 'AMULET_HTTP_PROXY: {}' . format ( http_proxy ) ) if http_proxy : proxies = { 'http' : http_proxy } opener = urllib . FancyURLopener ( proxies ) else : opener = urllib . FancyURLopener ( ) abs_file_name = os . path . join ( download_dir , image_name ) if not os . path . exists ( abs_file_name ) : opener . retrieve ( image_url , abs_file_name ) # Create glance image glance_properties = { 'architecture' : architecture , } if hypervisor_type : glance_properties [ 'hypervisor_type' ] = hypervisor_type # Create glance image if float ( glance . version ) < 2.0 : with open ( abs_file_name ) as f : image = glance . images . create ( name = image_name , is_public = True , disk_format = disk_format , container_format = container_format , properties = glance_properties , data = f ) else : image = glance . images . create ( name = image_name , visibility = "public" , disk_format = disk_format , container_format = container_format ) glance . images . upload ( image . id , open ( abs_file_name , 'rb' ) ) glance . images . update ( image . id , * * glance_properties ) # Wait for image to reach active status img_id = image . id ret = self . resource_reaches_status ( glance . images , img_id , expected_stat = 'active' , msg = 'Image status wait' ) if not ret : msg = 'Glance image failed to reach expected state.' amulet . raise_status ( amulet . FAIL , msg = msg ) # Re-validate new image self . log . debug ( 'Validating image attributes...' ) val_img_name = glance . images . get ( img_id ) . name val_img_stat = glance . images . get ( img_id ) . status val_img_cfmt = glance . images . get ( img_id ) . container_format val_img_dfmt = glance . images . get ( img_id ) . disk_format if float ( glance . version ) < 2.0 : val_img_pub = glance . images . get ( img_id ) . is_public else : val_img_pub = glance . images . get ( img_id ) . visibility == "public" msg_attr = ( 'Image attributes - name:{} public:{} id:{} stat:{} ' 'container fmt:{} disk fmt:{}' . format ( val_img_name , val_img_pub , img_id , val_img_stat , val_img_cfmt , val_img_dfmt ) ) if val_img_name == image_name and val_img_stat == 'active' and val_img_pub is True and val_img_cfmt == container_format and val_img_dfmt == disk_format : self . log . debug ( msg_attr ) else : msg = ( 'Image validation failed, {}' . format ( msg_attr ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return image
Download an image and upload it to glance validate its status and return an image object pointer . KVM defaults can override for LXD .
831
27
244,409
def create_cirros_image ( self , glance , image_name , hypervisor_type = None ) : # /!\ DEPRECATION WARNING self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'glance_create_image instead of ' 'create_cirros_image.' ) self . log . debug ( 'Creating glance cirros image ' '({})...' . format ( image_name ) ) # Get cirros image URL http_proxy = os . getenv ( 'AMULET_HTTP_PROXY' ) self . log . debug ( 'AMULET_HTTP_PROXY: {}' . format ( http_proxy ) ) if http_proxy : proxies = { 'http' : http_proxy } opener = urllib . FancyURLopener ( proxies ) else : opener = urllib . FancyURLopener ( ) f = opener . open ( 'http://download.cirros-cloud.net/version/released' ) version = f . read ( ) . strip ( ) cirros_img = 'cirros-{}-x86_64-disk.img' . format ( version ) cirros_url = 'http://{}/{}/{}' . format ( 'download.cirros-cloud.net' , version , cirros_img ) f . close ( ) return self . glance_create_image ( glance , image_name , cirros_url , hypervisor_type = hypervisor_type )
Download the latest cirros image and upload it to glance validate and return a resource pointer .
328
18
244,410
def delete_image ( self , glance , image ) : # /!\ DEPRECATION WARNING self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_image.' ) self . log . debug ( 'Deleting glance image ({})...' . format ( image ) ) return self . delete_resource ( glance . images , image , msg = 'glance image' )
Delete the specified image .
92
5
244,411
def create_instance ( self , nova , image_name , instance_name , flavor ) : self . log . debug ( 'Creating instance ' '({}|{}|{})' . format ( instance_name , image_name , flavor ) ) image = nova . glance . find_image ( image_name ) flavor = nova . flavors . find ( name = flavor ) instance = nova . servers . create ( name = instance_name , image = image , flavor = flavor ) count = 1 status = instance . status while status != 'ACTIVE' and count < 60 : time . sleep ( 3 ) instance = nova . servers . get ( instance . id ) status = instance . status self . log . debug ( 'instance status: {}' . format ( status ) ) count += 1 if status != 'ACTIVE' : self . log . error ( 'instance creation timed out' ) return None return instance
Create the specified instance .
196
5
244,412
def delete_instance ( self , nova , instance ) : # /!\ DEPRECATION WARNING self . log . warn ( '/!\\ DEPRECATION WARNING: use ' 'delete_resource instead of delete_instance.' ) self . log . debug ( 'Deleting instance ({})...' . format ( instance ) ) return self . delete_resource ( nova . servers , instance , msg = 'nova instance' )
Delete the specified instance .
92
5
244,413
def create_or_get_keypair ( self , nova , keypair_name = "testkey" ) : try : _keypair = nova . keypairs . get ( keypair_name ) self . log . debug ( 'Keypair ({}) already exists, ' 'using it.' . format ( keypair_name ) ) return _keypair except Exception : self . log . debug ( 'Keypair ({}) does not exist, ' 'creating it.' . format ( keypair_name ) ) _keypair = nova . keypairs . create ( name = keypair_name ) return _keypair
Create a new keypair or return pointer if it already exists .
135
13
244,414
def delete_resource ( self , resource , resource_id , msg = "resource" , max_wait = 120 ) : self . log . debug ( 'Deleting OpenStack resource ' '{} ({})' . format ( resource_id , msg ) ) num_before = len ( list ( resource . list ( ) ) ) resource . delete ( resource_id ) tries = 0 num_after = len ( list ( resource . list ( ) ) ) while num_after != ( num_before - 1 ) and tries < ( max_wait / 4 ) : self . log . debug ( '{} delete check: ' '{} [{}:{}] {}' . format ( msg , tries , num_before , num_after , resource_id ) ) time . sleep ( 4 ) num_after = len ( list ( resource . list ( ) ) ) tries += 1 self . log . debug ( '{}: expected, actual count = {}, ' '{}' . format ( msg , num_before - 1 , num_after ) ) if num_after == ( num_before - 1 ) : return True else : self . log . error ( '{} delete timed out' . format ( msg ) ) return False
Delete one openstack resource such as one instance keypair image volume stack etc . and confirm deletion within max wait time .
263
24
244,415
def resource_reaches_status ( self , resource , resource_id , expected_stat = 'available' , msg = 'resource' , max_wait = 120 ) : tries = 0 resource_stat = resource . get ( resource_id ) . status while resource_stat != expected_stat and tries < ( max_wait / 4 ) : self . log . debug ( '{} status check: ' '{} [{}:{}] {}' . format ( msg , tries , resource_stat , expected_stat , resource_id ) ) time . sleep ( 4 ) resource_stat = resource . get ( resource_id ) . status tries += 1 self . log . debug ( '{}: expected, actual status = {}, ' '{}' . format ( msg , resource_stat , expected_stat ) ) if resource_stat == expected_stat : return True else : self . log . debug ( '{} never reached expected status: ' '{}' . format ( resource_id , expected_stat ) ) return False
Wait for an openstack resources status to reach an expected status within a specified time . Useful to confirm that nova instances cinder vols snapshots glance images heat stacks and other resources eventually reach the expected status .
221
42
244,416
def get_ceph_pools ( self , sentry_unit ) : pools = { } cmd = 'sudo ceph osd lspools' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) # For mimic ceph osd lspools output output = output . replace ( "\n" , "," ) # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, for pool in str ( output ) . split ( ',' ) : pool_id_name = pool . split ( ' ' ) if len ( pool_id_name ) == 2 : pool_id = pool_id_name [ 0 ] pool_name = pool_id_name [ 1 ] pools [ pool_name ] = int ( pool_id ) self . log . debug ( 'Pools on {}: {}' . format ( sentry_unit . info [ 'unit_name' ] , pools ) ) return pools
Return a dict of ceph pools from a single ceph unit with pool name as keys pool id as vals .
264
24
244,417
def get_ceph_df ( self , sentry_unit ) : cmd = 'sudo ceph df --format=json' output , code = sentry_unit . run ( cmd ) if code != 0 : msg = ( '{} `{}` returned {} ' '{}' . format ( sentry_unit . info [ 'unit_name' ] , cmd , code , output ) ) amulet . raise_status ( amulet . FAIL , msg = msg ) return json . loads ( output )
Return dict of ceph df json output including ceph pool state .
109
14
244,418
def get_ceph_pool_sample ( self , sentry_unit , pool_id = 0 ) : df = self . get_ceph_df ( sentry_unit ) for pool in df [ 'pools' ] : if pool [ 'id' ] == pool_id : pool_name = pool [ 'name' ] obj_count = pool [ 'stats' ] [ 'objects' ] kb_used = pool [ 'stats' ] [ 'kb_used' ] self . log . debug ( 'Ceph {} pool (ID {}): {} objects, ' '{} kb used' . format ( pool_name , pool_id , obj_count , kb_used ) ) return pool_name , obj_count , kb_used
Take a sample of attributes of a ceph pool returning ceph pool name object count and disk space used for the specified pool ID number .
162
28
244,419
def validate_ceph_pool_samples ( self , samples , sample_type = "resource pool" ) : original , created , deleted = range ( 3 ) if samples [ created ] <= samples [ original ] or samples [ deleted ] >= samples [ created ] : return ( 'Ceph {} samples ({}) ' 'unexpected.' . format ( sample_type , samples ) ) else : self . log . debug ( 'Ceph {} samples (OK): ' '{}' . format ( sample_type , samples ) ) return None
Validate ceph pool samples taken over time such as pool object counts or pool kb used before adding after adding and after deleting items which affect those pool attributes . The 2nd element is expected to be greater than the 1st ; 3rd is expected to be less than the 2nd .
113
58
244,420
def rmq_wait_for_cluster ( self , deployment , init_sleep = 15 , timeout = 1200 ) : if init_sleep : time . sleep ( init_sleep ) message = re . compile ( '^Unit is ready and clustered$' ) deployment . _auto_wait_for_status ( message = message , timeout = timeout , include_only = [ 'rabbitmq-server' ] )
Wait for rmq units extended status to show cluster readiness after an optional initial sleep period . Initial sleep is likely necessary to be effective following a config change as status message may not instantly update to non - ready .
89
42
244,421
def get_rmq_cluster_status ( self , sentry_unit ) : cmd = 'rabbitmqctl cluster_status' output , _ = self . run_cmd_unit ( sentry_unit , cmd ) self . log . debug ( '{} cluster_status:\n{}' . format ( sentry_unit . info [ 'unit_name' ] , output ) ) return str ( output )
Execute rabbitmq cluster status command on a unit and return the full output .
92
17
244,422
def get_rmq_cluster_running_nodes ( self , sentry_unit ) : # NOTE(beisner): rabbitmqctl cluster_status output is not # json-parsable, do string chop foo, then json.loads that. str_stat = self . get_rmq_cluster_status ( sentry_unit ) if 'running_nodes' in str_stat : pos_start = str_stat . find ( "{running_nodes," ) + 15 pos_end = str_stat . find ( "]}," , pos_start ) + 1 str_run_nodes = str_stat [ pos_start : pos_end ] . replace ( "'" , '"' ) run_nodes = json . loads ( str_run_nodes ) return run_nodes else : return [ ]
Parse rabbitmqctl cluster_status output string return list of running rabbitmq cluster nodes .
182
21
244,423
def validate_rmq_cluster_running_nodes ( self , sentry_units ) : host_names = self . get_unit_hostnames ( sentry_units ) errors = [ ] # Query every unit for cluster_status running nodes for query_unit in sentry_units : query_unit_name = query_unit . info [ 'unit_name' ] running_nodes = self . get_rmq_cluster_running_nodes ( query_unit ) # Confirm that every unit is represented in the queried unit's # cluster_status running nodes output. for validate_unit in sentry_units : val_host_name = host_names [ validate_unit . info [ 'unit_name' ] ] val_node_name = 'rabbit@{}' . format ( val_host_name ) if val_node_name not in running_nodes : errors . append ( 'Cluster member check failed on {}: {} not ' 'in {}\n' . format ( query_unit_name , val_node_name , running_nodes ) ) if errors : return '' . join ( errors )
Check that all rmq unit hostnames are represented in the cluster_status output of all units .
249
20
244,424
def rmq_ssl_is_enabled_on_unit ( self , sentry_unit , port = None ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] conf_file = '/etc/rabbitmq/rabbitmq.config' conf_contents = str ( self . file_contents_safe ( sentry_unit , conf_file , max_wait = 16 ) ) # Checks conf_ssl = 'ssl' in conf_contents conf_port = str ( port ) in conf_contents # Port explicitly checked in config if port and conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif port and not conf_port and conf_ssl : self . log . debug ( 'SSL is enabled @{} but not on port {} ' '({})' . format ( host , port , unit_name ) ) return False # Port not checked (useful when checking that ssl is disabled) elif not port and conf_ssl : self . log . debug ( 'SSL is enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return True elif not conf_ssl : self . log . debug ( 'SSL not enabled @{}:{} ' '({})' . format ( host , port , unit_name ) ) return False else : msg = ( 'Unknown condition when checking SSL status @{}:{} ' '({})' . format ( host , port , unit_name ) ) amulet . raise_status ( amulet . FAIL , msg )
Check a single juju rmq unit for ssl and port in the config file .
383
18
244,425
def validate_rmq_ssl_enabled_units ( self , sentry_units , port = None ) : for sentry_unit in sentry_units : if not self . rmq_ssl_is_enabled_on_unit ( sentry_unit , port = port ) : return ( 'Unexpected condition: ssl is disabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on rmq juju sentry units .
100
15
244,426
def validate_rmq_ssl_disabled_units ( self , sentry_units ) : for sentry_unit in sentry_units : if self . rmq_ssl_is_enabled_on_unit ( sentry_unit ) : return ( 'Unexpected condition: ssl is enabled on unit ' '({})' . format ( sentry_unit . info [ 'unit_name' ] ) ) return None
Check that ssl is enabled on listed rmq juju sentry units .
91
16
244,427
def configure_rmq_ssl_on ( self , sentry_units , deployment , port = None , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: on' ) # Enable RMQ SSL config = { 'ssl' : 'on' } if port : config [ 'ssl_port' ] = port deployment . d . configure ( 'rabbitmq-server' , config ) # Wait for unit status self . rmq_wait_for_cluster ( deployment ) # Confirm tries = 0 ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_enabled_units ( sentry_units , port = port ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option on with optional non - default ssl port specification . Confirm that it is enabled on every unit .
224
27
244,428
def configure_rmq_ssl_off ( self , sentry_units , deployment , max_wait = 60 ) : self . log . debug ( 'Setting ssl charm config option: off' ) # Disable RMQ SSL config = { 'ssl' : 'off' } deployment . d . configure ( 'rabbitmq-server' , config ) # Wait for unit status self . rmq_wait_for_cluster ( deployment ) # Confirm tries = 0 ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) while ret and tries < ( max_wait / 4 ) : time . sleep ( 4 ) self . log . debug ( 'Attempt {}: {}' . format ( tries , ret ) ) ret = self . validate_rmq_ssl_disabled_units ( sentry_units ) tries += 1 if ret : amulet . raise_status ( amulet . FAIL , ret )
Turn ssl charm config option off confirm that it is disabled on every unit .
199
16
244,429
def connect_amqp_by_unit ( self , sentry_unit , ssl = False , port = None , fatal = True , username = "testuser1" , password = "changeme" ) : host = sentry_unit . info [ 'public-address' ] unit_name = sentry_unit . info [ 'unit_name' ] # Default port logic if port is not specified if ssl and not port : port = 5671 elif not ssl and not port : port = 5672 self . log . debug ( 'Connecting to amqp on {}:{} ({}) as ' '{}...' . format ( host , port , unit_name , username ) ) try : credentials = pika . PlainCredentials ( username , password ) parameters = pika . ConnectionParameters ( host = host , port = port , credentials = credentials , ssl = ssl , connection_attempts = 3 , retry_delay = 5 , socket_timeout = 1 ) connection = pika . BlockingConnection ( parameters ) assert connection . is_open is True assert connection . is_closing is False self . log . debug ( 'Connect OK' ) return connection except Exception as e : msg = ( 'amqp connection failed to {}:{} as ' '{} ({})' . format ( host , port , username , str ( e ) ) ) if fatal : amulet . raise_status ( amulet . FAIL , msg ) else : self . log . warn ( msg ) return None
Establish and return a pika amqp connection to the rabbitmq service running on a rmq juju unit .
326
26
244,430
def publish_amqp_message_by_unit ( self , sentry_unit , message , queue = "test" , ssl = False , username = "testuser1" , password = "changeme" , port = None ) : self . log . debug ( 'Publishing message to {} queue:\n{}' . format ( queue , message ) ) connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) # NOTE(beisner): extra debug here re: pika hang potential: # https://github.com/pika/pika/issues/297 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw self . log . debug ( 'Defining channel...' ) channel = connection . channel ( ) self . log . debug ( 'Declaring queue...' ) channel . queue_declare ( queue = queue , auto_delete = False , durable = True ) self . log . debug ( 'Publishing message...' ) channel . basic_publish ( exchange = '' , routing_key = queue , body = message ) self . log . debug ( 'Closing channel...' ) channel . close ( ) self . log . debug ( 'Closing connection...' ) connection . close ( )
Publish an amqp message to a rmq juju unit .
303
15
244,431
def get_amqp_message_by_unit ( self , sentry_unit , queue = "test" , username = "testuser1" , password = "changeme" , ssl = False , port = None ) : connection = self . connect_amqp_by_unit ( sentry_unit , ssl = ssl , port = port , username = username , password = password ) channel = connection . channel ( ) method_frame , _ , body = channel . basic_get ( queue ) if method_frame : self . log . debug ( 'Retreived message from {} queue:\n{}' . format ( queue , body ) ) channel . basic_ack ( method_frame . delivery_tag ) channel . close ( ) connection . close ( ) return body else : msg = 'No message retrieved.' amulet . raise_status ( amulet . FAIL , msg )
Get an amqp message from a rmq juju unit .
192
14
244,432
def validate_memcache ( self , sentry_unit , conf , os_release , earliest_release = 5 , section = 'keystone_authtoken' , check_kvs = None ) : if os_release < earliest_release : self . log . debug ( 'Skipping memcache checks for deployment. {} <' 'mitaka' . format ( os_release ) ) return _kvs = check_kvs or { 'memcached_servers' : 'inet6:[::1]:11211' } self . log . debug ( 'Checking memcached is running' ) ret = self . validate_services_by_name ( { sentry_unit : [ 'memcached' ] } ) if ret : amulet . raise_status ( amulet . FAIL , msg = 'Memcache running check' 'failed {}' . format ( ret ) ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache url is configured in {}' . format ( conf ) ) if self . validate_config_data ( sentry_unit , conf , section , _kvs ) : message = "Memcache config error in: {}" . format ( conf ) amulet . raise_status ( amulet . FAIL , msg = message ) else : self . log . debug ( 'OK' ) self . log . debug ( 'Checking memcache configuration in ' '/etc/memcached.conf' ) contents = self . file_contents_safe ( sentry_unit , '/etc/memcached.conf' , fatal = True ) ubuntu_release , _ = self . run_cmd_unit ( sentry_unit , 'lsb_release -cs' ) if CompareHostReleases ( ubuntu_release ) <= 'trusty' : memcache_listen_addr = 'ip6-localhost' else : memcache_listen_addr = '::1' expected = { '-p' : '11211' , '-l' : memcache_listen_addr } found = [ ] for key , value in expected . items ( ) : for line in contents . split ( '\n' ) : if line . startswith ( key ) : self . log . debug ( 'Checking {} is set to {}' . format ( key , value ) ) assert value == line . split ( ) [ - 1 ] self . log . debug ( line . split ( ) [ - 1 ] ) found . append ( key ) if sorted ( found ) == sorted ( expected . keys ( ) ) : self . log . debug ( 'OK' ) else : message = "Memcache config error in: /etc/memcached.conf" amulet . raise_status ( amulet . FAIL , msg = message )
Check Memcache is running and is configured to be used
599
11
244,433
def acquire ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if not ts : # If there is no outstanding request on the peers relation, # create one. self . requests . setdefault ( lock , { } ) self . requests [ unit ] [ lock ] = _timestamp ( ) self . msg ( 'Requested {}' . format ( lock ) ) # If the leader has granted the lock, yay. if self . granted ( lock ) : self . msg ( 'Acquired {}' . format ( lock ) ) return True # If the unit making the request also happens to be the # leader, it must handle the request now. Even though the # request has been stored on the peers relation, the peers # relation-changed hook will not be triggered. if hookenv . is_leader ( ) : return self . grant ( lock , unit ) return False
Acquire the named lock non - blocking .
199
9
244,434
def granted ( self , lock ) : unit = hookenv . local_unit ( ) ts = self . requests [ unit ] . get ( lock ) if ts and self . grants . get ( unit , { } ) . get ( lock ) == ts : return True return False
Return True if a previously requested lock has been granted
57
10
244,435
def request_timestamp ( self , lock ) : ts = self . requests [ hookenv . local_unit ( ) ] . get ( lock , None ) if ts is not None : return datetime . strptime ( ts , _timestamp_format )
Return the timestamp of our outstanding request for lock or None .
55
12
244,436
def grant ( self , lock , unit ) : if not hookenv . is_leader ( ) : return False # Not the leader, so we cannot grant. # Set of units already granted the lock. granted = set ( ) for u in self . grants : if lock in self . grants [ u ] : granted . add ( u ) if unit in granted : return True # Already granted. # Ordered list of units waiting for the lock. reqs = set ( ) for u in self . requests : if u in granted : continue # In the granted set. Not wanted in the req list. for _lock , ts in self . requests [ u ] . items ( ) : if _lock == lock : reqs . add ( ( ts , u ) ) queue = [ t [ 1 ] for t in sorted ( reqs ) ] if unit not in queue : return False # Unit has not requested the lock. # Locate custom logic, or fallback to the default. grant_func = getattr ( self , 'grant_{}' . format ( lock ) , self . default_grant ) if grant_func ( lock , unit , granted , queue ) : # Grant the lock. self . msg ( 'Leader grants {} to {}' . format ( lock , unit ) ) self . grants . setdefault ( unit , { } ) [ lock ] = self . requests [ unit ] [ lock ] return True return False
Maybe grant the lock to a unit .
298
8
244,437
def released ( self , unit , lock , timestamp ) : interval = _utcnow ( ) - timestamp self . msg ( 'Leader released {} from {}, held {}' . format ( lock , unit , interval ) )
Called on the leader when it has released a lock .
46
12
244,438
def require ( self , lock , guard_func , * guard_args , * * guard_kw ) : def decorator ( f ) : @ wraps ( f ) def wrapper ( * args , * * kw ) : if self . granted ( lock ) : self . msg ( 'Granted {}' . format ( lock ) ) return f ( * args , * * kw ) if guard_func ( * guard_args , * * guard_kw ) and self . acquire ( lock ) : return f ( * args , * * kw ) return None return wrapper return decorator
Decorate a function to be run only when a lock is acquired .
123
14
244,439
def msg ( self , msg ) : hookenv . log ( 'coordinator.{} {}' . format ( self . _name ( ) , msg ) , level = hookenv . INFO )
Emit a message . Override to customize log spam .
41
12
244,440
def deprecate ( warning , date = None , log = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , * * kwargs ) : try : module = inspect . getmodule ( f ) file = inspect . getsourcefile ( f ) lines = inspect . getsourcelines ( f ) f_name = "{}-{}-{}..{}-{}" . format ( module . __name__ , file , lines [ 0 ] , lines [ - 1 ] , f . __name__ ) except ( IOError , TypeError ) : # assume it was local, so just use the name of the function f_name = f . __name__ if f_name not in __deprecated_functions : __deprecated_functions [ f_name ] = True s = "DEPRECATION WARNING: Function {} is being removed" . format ( f . __name__ ) if date : s = "{} on/around {}" . format ( s , date ) if warning : s = "{} : {}" . format ( s , warning ) if log : log ( s ) else : print ( s ) return f ( * args , * * kwargs ) return wrapped_f return wrap
Add a deprecation warning the first time the function is used . The date which is a string in semi - ISO8660 format indicate the year - month that the function is officially going to be removed .
271
42
244,441
def download ( self , source , dest ) : # propagate all exceptions # URLError, OSError, etc proto , netloc , path , params , query , fragment = urlparse ( source ) if proto in ( 'http' , 'https' ) : auth , barehost = splituser ( netloc ) if auth is not None : source = urlunparse ( ( proto , barehost , path , params , query , fragment ) ) username , password = splitpasswd ( auth ) passman = HTTPPasswordMgrWithDefaultRealm ( ) # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman . add_password ( None , source , username , password ) authhandler = HTTPBasicAuthHandler ( passman ) opener = build_opener ( authhandler ) install_opener ( opener ) response = urlopen ( source ) try : with open ( dest , 'wb' ) as dest_file : dest_file . write ( response . read ( ) ) except Exception as e : if os . path . isfile ( dest ) : os . unlink ( dest ) raise e
Download an archive file .
244
5
244,442
def install ( self , source , dest = None , checksum = None , hash_type = 'sha1' ) : url_parts = self . parse_url ( source ) dest_dir = os . path . join ( os . environ . get ( 'CHARM_DIR' ) , 'fetched' ) if not os . path . exists ( dest_dir ) : mkdir ( dest_dir , perms = 0o755 ) dld_file = os . path . join ( dest_dir , os . path . basename ( url_parts . path ) ) try : self . download ( source , dld_file ) except URLError as e : raise UnhandledSource ( e . reason ) except OSError as e : raise UnhandledSource ( e . strerror ) options = parse_qs ( url_parts . fragment ) for key , value in options . items ( ) : if not six . PY3 : algorithms = hashlib . algorithms else : algorithms = hashlib . algorithms_available if key in algorithms : if len ( value ) != 1 : raise TypeError ( "Expected 1 hash value, not %d" % len ( value ) ) expected = value [ 0 ] check_hash ( dld_file , expected , key ) if checksum : check_hash ( dld_file , checksum , hash_type ) return extract ( dld_file , dest )
Download and install an archive file with optional checksum validation .
302
12
244,443
def set_trace ( addr = DEFAULT_ADDR , port = DEFAULT_PORT ) : atexit . register ( close_port , port ) try : log ( "Starting a remote python debugger session on %s:%s" % ( addr , port ) ) open_port ( port ) debugger = Rpdb ( addr = addr , port = port ) debugger . set_trace ( sys . _getframe ( ) . f_back ) except Exception : _error ( "Cannot start a remote debug session on %s:%s" % ( addr , port ) )
Set a trace point using the remote debugger
123
8
244,444
def device_info ( device ) : status = subprocess . check_output ( [ 'ibstat' , device , '-s' ] ) . splitlines ( ) regexes = { "CA type: (.*)" : "device_type" , "Number of ports: (.*)" : "num_ports" , "Firmware version: (.*)" : "fw_ver" , "Hardware version: (.*)" : "hw_ver" , "Node GUID: (.*)" : "node_guid" , "System image GUID: (.*)" : "sys_guid" , } device = DeviceInfo ( ) for line in status : for expression , key in regexes . items ( ) : matches = re . search ( expression , line ) if matches : setattr ( device , key , matches . group ( 1 ) ) return device
Returns a DeviceInfo object with the current device settings
185
10
244,445
def ipoib_interfaces ( ) : interfaces = [ ] for interface in network_interfaces ( ) : try : driver = re . search ( '^driver: (.+)$' , subprocess . check_output ( [ 'ethtool' , '-i' , interface ] ) , re . M ) . group ( 1 ) if driver in IPOIB_DRIVERS : interfaces . append ( interface ) except Exception : log ( "Skipping interface %s" % interface , level = INFO ) continue return interfaces
Return a list of IPOIB capable ethernet interfaces
110
10
244,446
def get_audits ( ) : audits = [ TemplatedFile ( '/etc/login.defs' , LoginContext ( ) , template_dir = TEMPLATES_DIR , user = 'root' , group = 'root' , mode = 0o0444 ) ] return audits
Get OS hardening login . defs audits .
62
10
244,447
def _get_defaults ( modules ) : default = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml' % ( modules ) ) return yaml . safe_load ( open ( default ) )
Load the default config for the provided modules .
58
9
244,448
def _get_schema ( modules ) : schema = os . path . join ( os . path . dirname ( __file__ ) , 'defaults/%s.yaml.schema' % ( modules ) ) return yaml . safe_load ( open ( schema ) )
Load the config schema for the provided modules .
61
9
244,449
def _get_user_provided_overrides ( modules ) : overrides = os . path . join ( os . environ [ 'JUJU_CHARM_DIR' ] , 'hardening.yaml' ) if os . path . exists ( overrides ) : log ( "Found user-provided config overrides file '%s'" % ( overrides ) , level = DEBUG ) settings = yaml . safe_load ( open ( overrides ) ) if settings and settings . get ( modules ) : log ( "Applying '%s' overrides" % ( modules ) , level = DEBUG ) return settings . get ( modules ) log ( "No overrides found for '%s'" % ( modules ) , level = DEBUG ) else : log ( "No hardening config overrides file '%s' found in charm " "root dir" % ( overrides ) , level = DEBUG ) return { }
Load user - provided config overrides .
197
8
244,450
def _apply_overrides ( settings , overrides , schema ) : if overrides : for k , v in six . iteritems ( overrides ) : if k in schema : if schema [ k ] is None : settings [ k ] = v elif type ( schema [ k ] ) is dict : settings [ k ] = _apply_overrides ( settings [ k ] , overrides [ k ] , schema [ k ] ) else : raise Exception ( "Unexpected type found in schema '%s'" % type ( schema [ k ] ) , level = ERROR ) else : log ( "Unknown override key '%s' - ignoring" % ( k ) , level = INFO ) return settings
Get overrides config overlayed onto modules defaults .
148
10
244,451
def ensure_permissions ( path , user , group , permissions , maxdepth = - 1 ) : if not os . path . exists ( path ) : log ( "File '%s' does not exist - cannot set permissions" % ( path ) , level = WARNING ) return _user = pwd . getpwnam ( user ) os . chown ( path , _user . pw_uid , grp . getgrnam ( group ) . gr_gid ) os . chmod ( path , permissions ) if maxdepth == 0 : log ( "Max recursion depth reached - skipping further recursion" , level = DEBUG ) return elif maxdepth > 0 : maxdepth -= 1 if os . path . isdir ( path ) : contents = glob . glob ( "%s/*" % ( path ) ) for c in contents : ensure_permissions ( c , user = user , group = group , permissions = permissions , maxdepth = maxdepth )
Ensure permissions for path .
203
6
244,452
def create ( sysctl_dict , sysctl_file , ignore = False ) : if type ( sysctl_dict ) is not dict : try : sysctl_dict_parsed = yaml . safe_load ( sysctl_dict ) except yaml . YAMLError : log ( "Error parsing YAML sysctl_dict: {}" . format ( sysctl_dict ) , level = ERROR ) return else : sysctl_dict_parsed = sysctl_dict with open ( sysctl_file , "w" ) as fd : for key , value in sysctl_dict_parsed . items ( ) : fd . write ( "{}={}\n" . format ( key , value ) ) log ( "Updating sysctl_file: {} values: {}" . format ( sysctl_file , sysctl_dict_parsed ) , level = DEBUG ) call = [ "sysctl" , "-p" , sysctl_file ] if ignore : call . append ( "-e" ) check_call ( call )
Creates a sysctl . conf file from a YAML associative array
231
16
244,453
def canonical_url ( configs , endpoint_type = PUBLIC ) : scheme = _get_scheme ( configs ) address = resolve_address ( endpoint_type ) if is_ipv6 ( address ) : address = "[{}]" . format ( address ) return '%s://%s' % ( scheme , address )
Returns the correct HTTP URL to this host given the state of HTTPS configuration hacluster and charm configuration .
71
21
244,454
def _get_address_override ( endpoint_type = PUBLIC ) : override_key = ADDRESS_MAP [ endpoint_type ] [ 'override' ] addr_override = config ( override_key ) if not addr_override : return None else : return addr_override . format ( service_name = service_name ( ) )
Returns any address overrides that the user has defined based on the endpoint type .
76
16
244,455
def resolve_address ( endpoint_type = PUBLIC , override = True ) : resolved_address = None if override : resolved_address = _get_address_override ( endpoint_type ) if resolved_address : return resolved_address vips = config ( 'vip' ) if vips : vips = vips . split ( ) net_type = ADDRESS_MAP [ endpoint_type ] [ 'config' ] net_addr = config ( net_type ) net_fallback = ADDRESS_MAP [ endpoint_type ] [ 'fallback' ] binding = ADDRESS_MAP [ endpoint_type ] [ 'binding' ] clustered = is_clustered ( ) if clustered and vips : if net_addr : for vip in vips : if is_address_in_network ( net_addr , vip ) : resolved_address = vip break else : # NOTE: endeavour to check vips against network space # bindings try : bound_cidr = resolve_network_cidr ( network_get_primary_address ( binding ) ) for vip in vips : if is_address_in_network ( bound_cidr , vip ) : resolved_address = vip break except ( NotImplementedError , NoNetworkBinding ) : # If no net-splits configured and no support for extra # bindings/network spaces so we expect a single vip resolved_address = vips [ 0 ] else : if config ( 'prefer-ipv6' ) : fallback_addr = get_ipv6_addr ( exc_list = vips ) [ 0 ] else : fallback_addr = unit_get ( net_fallback ) if net_addr : resolved_address = get_address_in_network ( net_addr , fallback_addr ) else : # NOTE: only try to use extra bindings if legacy network # configuration is not in use try : resolved_address = network_get_primary_address ( binding ) except ( NotImplementedError , NoNetworkBinding ) : resolved_address = fallback_addr if resolved_address is None : raise ValueError ( "Unable to resolve a suitable IP address based on " "charm state and configuration. (net_type=%s, " "clustered=%s)" % ( net_type , clustered ) ) return resolved_address
Return unit address depending on net config .
511
8
244,456
def hugepage_support ( user , group = 'hugetlb' , nr_hugepages = 256 , max_map_count = 65536 , mnt_point = '/run/hugepages/kvm' , pagesize = '2MB' , mount = True , set_shmmax = False ) : group_info = add_group ( group ) gid = group_info . gr_gid add_user_to_group ( user , group ) if max_map_count < 2 * nr_hugepages : max_map_count = 2 * nr_hugepages sysctl_settings = { 'vm.nr_hugepages' : nr_hugepages , 'vm.max_map_count' : max_map_count , 'vm.hugetlb_shm_group' : gid , } if set_shmmax : shmmax_current = int ( check_output ( [ 'sysctl' , '-n' , 'kernel.shmmax' ] ) ) shmmax_minsize = bytes_from_string ( pagesize ) * nr_hugepages if shmmax_minsize > shmmax_current : sysctl_settings [ 'kernel.shmmax' ] = shmmax_minsize sysctl . create ( yaml . dump ( sysctl_settings ) , '/etc/sysctl.d/10-hugepage.conf' ) mkdir ( mnt_point , owner = 'root' , group = 'root' , perms = 0o755 , force = False ) lfstab = fstab . Fstab ( ) fstab_entry = lfstab . get_entry_by_attr ( 'mountpoint' , mnt_point ) if fstab_entry : lfstab . remove_entry ( fstab_entry ) entry = lfstab . Entry ( 'nodev' , mnt_point , 'hugetlbfs' , 'mode=1770,gid={},pagesize={}' . format ( gid , pagesize ) , 0 , 0 ) lfstab . add_entry ( entry ) if mount : fstab_mount ( mnt_point )
Enable hugepages on system .
480
6
244,457
def ensure_compliance ( self ) : if not self . modules : return try : loaded_modules = self . _get_loaded_modules ( ) non_compliant_modules = [ ] for module in self . modules : if module in loaded_modules : log ( "Module '%s' is enabled but should not be." % ( module ) , level = INFO ) non_compliant_modules . append ( module ) if len ( non_compliant_modules ) == 0 : return for module in non_compliant_modules : self . _disable_module ( module ) self . _restart_apache ( ) except subprocess . CalledProcessError as e : log ( 'Error occurred auditing apache module compliance. ' 'This may have been already reported. ' 'Output is: %s' % e . output , level = ERROR )
Ensures that the modules are not loaded .
179
10
244,458
def _get_loaded_modules ( ) : output = subprocess . check_output ( [ 'apache2ctl' , '-M' ] ) if six . PY3 : output = output . decode ( 'utf-8' ) modules = [ ] for line in output . splitlines ( ) : # Each line of the enabled module output looks like: # module_name (static|shared) # Plus a header line at the top of the output which is stripped # out by the regex. matcher = re . search ( r'^ (\S*)_module (\S*)' , line ) if matcher : modules . append ( matcher . group ( 1 ) ) return modules
Returns the modules which are enabled in Apache .
147
9
244,459
def _disable_module ( module ) : try : subprocess . check_call ( [ 'a2dismod' , module ] ) except subprocess . CalledProcessError as e : # Note: catch error here to allow the attempt of disabling # multiple modules in one go rather than failing after the # first module fails. log ( 'Error occurred disabling module %s. ' 'Output is: %s' % ( module , e . output ) , level = ERROR )
Disables the specified module in Apache .
99
8
244,460
def get_template_path ( template_dir , path ) : return os . path . join ( template_dir , os . path . basename ( path ) )
Returns the template file which would be used to render the path .
35
13
244,461
def render_and_write ( template_dir , path , context ) : env = Environment ( loader = FileSystemLoader ( template_dir ) ) template_file = os . path . basename ( path ) template = env . get_template ( template_file ) log ( 'Rendering from template: %s' % template . name , level = DEBUG ) rendered_content = template . render ( context ) if not rendered_content : log ( "Render returned None - skipping '%s'" % path , level = WARNING ) return write ( path , rendered_content . encode ( 'utf-8' ) . strip ( ) ) log ( 'Wrote template %s' % path , level = DEBUG )
Renders the specified template into the file .
150
9
244,462
def get_audits ( ) : audits = [ AptConfig ( [ { 'key' : 'APT::Get::AllowUnauthenticated' , 'expected' : 'false' } ] ) ] settings = get_settings ( 'os' ) clean_packages = settings [ 'security' ] [ 'packages_clean' ] if clean_packages : security_packages = settings [ 'security' ] [ 'packages_list' ] if security_packages : audits . append ( RestrictedPackages ( security_packages ) ) return audits
Get OS hardening apt audits .
114
7
244,463
def get_audits ( ) : audits = [ ] settings = utils . get_settings ( 'os' ) if settings [ 'auth' ] [ 'pam_passwdqc_enable' ] : audits . append ( PasswdqcPAM ( '/etc/passwdqc.conf' ) ) if settings [ 'auth' ] [ 'retries' ] : audits . append ( Tally2PAM ( '/usr/share/pam-configs/tally2' ) ) else : audits . append ( DeletedFile ( '/usr/share/pam-configs/tally2' ) ) return audits
Get OS hardening PAM authentication audits .
139
9
244,464
def install_ansible_support ( from_ppa = True , ppa_location = 'ppa:rquillo/ansible' ) : if from_ppa : charmhelpers . fetch . add_source ( ppa_location ) charmhelpers . fetch . apt_update ( fatal = True ) charmhelpers . fetch . apt_install ( 'ansible' ) with open ( ansible_hosts_path , 'w+' ) as hosts_file : hosts_file . write ( 'localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp' )
Installs the ansible package .
130
7
244,465
def execute ( self , args ) : hook_name = os . path . basename ( args [ 0 ] ) extra_vars = None if hook_name in self . _actions : extra_vars = self . _actions [ hook_name ] ( args [ 1 : ] ) else : super ( AnsibleHooks , self ) . execute ( args ) charmhelpers . contrib . ansible . apply_playbook ( self . playbook_path , tags = [ hook_name ] , extra_vars = extra_vars )
Execute the hook followed by the playbook using the hook as tag .
116
14
244,466
def action ( self , * action_names ) : def action_wrapper ( decorated ) : @ functools . wraps ( decorated ) def wrapper ( argv ) : kwargs = dict ( arg . split ( '=' ) for arg in argv ) try : return decorated ( * * kwargs ) except TypeError as e : if decorated . __doc__ : e . args += ( decorated . __doc__ , ) raise self . register_action ( decorated . __name__ , wrapper ) if '_' in decorated . __name__ : self . register_action ( decorated . __name__ . replace ( '_' , '-' ) , wrapper ) return wrapper return action_wrapper
Decorator registering them as actions
146
7
244,467
def get_logger ( self , name = "deployment-logger" , level = logging . DEBUG ) : log = logging logger = log . getLogger ( name ) fmt = log . Formatter ( "%(asctime)s %(funcName)s " "%(levelname)s: %(message)s" ) handler = log . StreamHandler ( stream = sys . stdout ) handler . setLevel ( level ) handler . setFormatter ( fmt ) logger . addHandler ( handler ) logger . setLevel ( level ) return logger
Get a logger object that will log to stdout .
119
11
244,468
def _determine_branch_locations ( self , other_services ) : self . log . info ( 'OpenStackAmuletDeployment: determine branch locations' ) # Charms outside the ~openstack-charmers base_charms = { 'mysql' : [ 'trusty' ] , 'mongodb' : [ 'trusty' ] , 'nrpe' : [ 'trusty' , 'xenial' ] , } for svc in other_services : # If a location has been explicitly set, use it if svc . get ( 'location' ) : continue if svc [ 'name' ] in base_charms : # NOTE: not all charms have support for all series we # want/need to test against, so fix to most recent # that each base charm supports target_series = self . series if self . series not in base_charms [ svc [ 'name' ] ] : target_series = base_charms [ svc [ 'name' ] ] [ - 1 ] svc [ 'location' ] = 'cs:{}/{}' . format ( target_series , svc [ 'name' ] ) elif self . stable : svc [ 'location' ] = 'cs:{}/{}' . format ( self . series , svc [ 'name' ] ) else : svc [ 'location' ] = 'cs:~openstack-charmers-next/{}/{}' . format ( self . series , svc [ 'name' ] ) return other_services
Determine the branch locations for the other services .
340
11
244,469
def _auto_wait_for_status ( self , message = None , exclude_services = None , include_only = None , timeout = None ) : if not timeout : timeout = int ( os . environ . get ( 'AMULET_SETUP_TIMEOUT' , 1800 ) ) self . log . info ( 'Waiting for extended status on units for {}s...' '' . format ( timeout ) ) all_services = self . d . services . keys ( ) if exclude_services and include_only : raise ValueError ( 'exclude_services can not be used ' 'with include_only' ) if message : if isinstance ( message , re . _pattern_type ) : match = message . pattern else : match = message self . log . debug ( 'Custom extended status wait match: ' '{}' . format ( match ) ) else : self . log . debug ( 'Default extended status wait match: contains ' 'READY (case-insensitive)' ) message = re . compile ( '.*ready.*' , re . IGNORECASE ) if exclude_services : self . log . debug ( 'Excluding services from extended status match: ' '{}' . format ( exclude_services ) ) else : exclude_services = [ ] if include_only : services = include_only else : services = list ( set ( all_services ) - set ( exclude_services ) ) self . log . debug ( 'Waiting up to {}s for extended status on services: ' '{}' . format ( timeout , services ) ) service_messages = { service : message for service in services } # Check for idleness self . d . sentry . wait ( timeout = timeout ) # Check for error states and bail early self . d . sentry . wait_for_status ( self . d . juju_env , services , timeout = timeout ) # Check for ready messages self . d . sentry . wait_for_messages ( service_messages , timeout = timeout ) self . log . info ( 'OK' )
Wait for all units to have a specific extended status except for any defined as excluded . Unless specified via message any status containing any case of ready will be considered a match .
438
34
244,470
def _get_openstack_release ( self ) : # Must be ordered by OpenStack release (not by Ubuntu release): for i , os_pair in enumerate ( OPENSTACK_RELEASES_PAIRS ) : setattr ( self , os_pair , i ) releases = { ( 'trusty' , None ) : self . trusty_icehouse , ( 'trusty' , 'cloud:trusty-kilo' ) : self . trusty_kilo , ( 'trusty' , 'cloud:trusty-liberty' ) : self . trusty_liberty , ( 'trusty' , 'cloud:trusty-mitaka' ) : self . trusty_mitaka , ( 'xenial' , None ) : self . xenial_mitaka , ( 'xenial' , 'cloud:xenial-newton' ) : self . xenial_newton , ( 'xenial' , 'cloud:xenial-ocata' ) : self . xenial_ocata , ( 'xenial' , 'cloud:xenial-pike' ) : self . xenial_pike , ( 'xenial' , 'cloud:xenial-queens' ) : self . xenial_queens , ( 'yakkety' , None ) : self . yakkety_newton , ( 'zesty' , None ) : self . zesty_ocata , ( 'artful' , None ) : self . artful_pike , ( 'bionic' , None ) : self . bionic_queens , ( 'bionic' , 'cloud:bionic-rocky' ) : self . bionic_rocky , ( 'bionic' , 'cloud:bionic-stein' ) : self . bionic_stein , ( 'cosmic' , None ) : self . cosmic_rocky , ( 'disco' , None ) : self . disco_stein , } return releases [ ( self . series , self . openstack ) ]
Get openstack release .
447
5
244,471
def _get_openstack_release_string ( self ) : releases = OrderedDict ( [ ( 'trusty' , 'icehouse' ) , ( 'xenial' , 'mitaka' ) , ( 'yakkety' , 'newton' ) , ( 'zesty' , 'ocata' ) , ( 'artful' , 'pike' ) , ( 'bionic' , 'queens' ) , ( 'cosmic' , 'rocky' ) , ( 'disco' , 'stein' ) , ] ) if self . openstack : os_origin = self . openstack . split ( ':' ) [ 1 ] return os_origin . split ( '%s-' % self . series ) [ 1 ] . split ( '/' ) [ 0 ] else : return releases [ self . series ]
Get openstack release string .
182
6
244,472
def get_ceph_expected_pools ( self , radosgw = False ) : if self . _get_openstack_release ( ) == self . trusty_icehouse : # Icehouse pools = [ 'data' , 'metadata' , 'rbd' , 'cinder-ceph' , 'glance' ] elif ( self . trusty_kilo <= self . _get_openstack_release ( ) <= self . zesty_ocata ) : # Kilo through Ocata pools = [ 'rbd' , 'cinder-ceph' , 'glance' ] else : # Pike and later pools = [ 'cinder-ceph' , 'glance' ] if radosgw : pools . extend ( [ '.rgw.root' , '.rgw.control' , '.rgw' , '.rgw.gc' , '.users.uid' ] ) return pools
Return a list of expected ceph pools in a ceph + cinder + glance test scenario based on OpenStack release and whether ceph radosgw is flagged as present or not .
202
39
244,473
def get_platform ( ) : # linux_distribution is deprecated and will be removed in Python 3.7 # Warings *not* disabled, as we certainly need to fix this. tuple_platform = platform . linux_distribution ( ) current_platform = tuple_platform [ 0 ] if "Ubuntu" in current_platform : return "ubuntu" elif "CentOS" in current_platform : return "centos" elif "debian" in current_platform : # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" else : raise RuntimeError ( "This module is not supported on {}." . format ( current_platform ) )
Return the current OS platform .
153
6
244,474
def current_version_string ( ) : return "{0}.{1}.{2}" . format ( sys . version_info . major , sys . version_info . minor , sys . version_info . micro )
Current system python version as string major . minor . micro
46
11
244,475
def get_audits ( ) : if subprocess . call ( [ 'which' , 'mysql' ] , stdout = subprocess . PIPE ) != 0 : log ( "MySQL does not appear to be installed on this node - " "skipping mysql hardening" , level = WARNING ) return [ ] settings = utils . get_settings ( 'mysql' ) hardening_settings = settings [ 'hardening' ] my_cnf = hardening_settings [ 'mysql-conf' ] audits = [ FilePermissionAudit ( paths = [ my_cnf ] , user = 'root' , group = 'root' , mode = 0o0600 ) , TemplatedFile ( hardening_settings [ 'hardening-conf' ] , MySQLConfContext ( ) , TEMPLATES_DIR , mode = 0o0750 , user = 'mysql' , group = 'root' , service_actions = [ { 'service' : 'mysql' , 'actions' : [ 'restart' ] } ] ) , # MySQL and Percona charms do not allow configuration of the # data directory, so use the default. DirectoryPermissionAudit ( '/var/lib/mysql' , user = 'mysql' , group = 'mysql' , recursive = False , mode = 0o755 ) , DirectoryPermissionAudit ( '/etc/mysql' , user = 'root' , group = 'root' , recursive = False , mode = 0o700 ) , ] return audits
Get MySQL hardening config audits .
330
7
244,476
def service_reload ( service_name , restart_on_failure = False , * * kwargs ) : service_result = service ( 'reload' , service_name , * * kwargs ) if not service_result and restart_on_failure : service_result = service ( 'restart' , service_name , * * kwargs ) return service_result
Reload a system service optionally falling back to restart if reload fails .
85
14
244,477
def service_pause ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , * * kwargs ) : stopped = True if service_running ( service_name , * * kwargs ) : stopped = service_stop ( service_name , * * kwargs ) upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'disable' , service_name ) service ( 'mask' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) with open ( override_path , 'w' ) as fh : fh . write ( "manual\n" ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "disable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) return stopped
Pause a system service .
311
5
244,478
def service_resume ( service_name , init_dir = "/etc/init" , initd_dir = "/etc/init.d" , * * kwargs ) : upstart_file = os . path . join ( init_dir , "{}.conf" . format ( service_name ) ) sysv_file = os . path . join ( initd_dir , service_name ) if init_is_systemd ( ) : service ( 'unmask' , service_name ) service ( 'enable' , service_name ) elif os . path . exists ( upstart_file ) : override_path = os . path . join ( init_dir , '{}.override' . format ( service_name ) ) if os . path . exists ( override_path ) : os . unlink ( override_path ) elif os . path . exists ( sysv_file ) : subprocess . check_call ( [ "update-rc.d" , service_name , "enable" ] ) else : raise ValueError ( "Unable to detect {0} as SystemD, Upstart {1} or" " SysV {2}" . format ( service_name , upstart_file , sysv_file ) ) started = service_running ( service_name , * * kwargs ) if not started : started = service_start ( service_name , * * kwargs ) return started
Resume a system service .
308
6
244,479
def service ( action , service_name , * * kwargs ) : if init_is_systemd ( ) : cmd = [ 'systemctl' , action , service_name ] else : cmd = [ 'service' , service_name , action ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) return subprocess . call ( cmd ) == 0
Control a system service .
101
5
244,480
def service_running ( service_name , * * kwargs ) : if init_is_systemd ( ) : return service ( 'is-active' , service_name ) else : if os . path . exists ( _UPSTART_CONF . format ( service_name ) ) : try : cmd = [ 'status' , service_name ] for key , value in six . iteritems ( kwargs ) : parameter = '%s=%s' % ( key , value ) cmd . append ( parameter ) output = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) except subprocess . CalledProcessError : return False else : # This works for upstart scripts where the 'service' command # returns a consistent string to represent running # 'start/running' if ( "start/running" in output or "is running" in output or "up and running" in output ) : return True elif os . path . exists ( _INIT_D_CONF . format ( service_name ) ) : # Check System V scripts init script return codes return service ( 'status' , service_name ) return False
Determine whether a system service is running .
257
10
244,481
def adduser ( username , password = None , shell = '/bin/bash' , system_user = False , primary_group = None , secondary_groups = None , uid = None , home_dir = None ) : try : user_info = pwd . getpwnam ( username ) log ( 'user {0} already exists!' . format ( username ) ) if uid : user_info = pwd . getpwuid ( int ( uid ) ) log ( 'user with uid {0} already exists!' . format ( uid ) ) except KeyError : log ( 'creating user {0}' . format ( username ) ) cmd = [ 'useradd' ] if uid : cmd . extend ( [ '--uid' , str ( uid ) ] ) if home_dir : cmd . extend ( [ '--home' , str ( home_dir ) ] ) if system_user or password is None : cmd . append ( '--system' ) else : cmd . extend ( [ '--create-home' , '--shell' , shell , '--password' , password , ] ) if not primary_group : try : grp . getgrnam ( username ) primary_group = username # avoid "group exists" error except KeyError : pass if primary_group : cmd . extend ( [ '-g' , primary_group ] ) if secondary_groups : cmd . extend ( [ '-G' , ',' . join ( secondary_groups ) ] ) cmd . append ( username ) subprocess . check_call ( cmd ) user_info = pwd . getpwnam ( username ) return user_info
Add a user to the system .
355
7
244,482
def user_exists ( username ) : try : pwd . getpwnam ( username ) user_exists = True except KeyError : user_exists = False return user_exists
Check if a user exists
42
5
244,483
def uid_exists ( uid ) : try : pwd . getpwuid ( uid ) uid_exists = True except KeyError : uid_exists = False return uid_exists
Check if a uid exists
48
6
244,484
def group_exists ( groupname ) : try : grp . getgrnam ( groupname ) group_exists = True except KeyError : group_exists = False return group_exists
Check if a group exists
43
5
244,485
def gid_exists ( gid ) : try : grp . getgrgid ( gid ) gid_exists = True except KeyError : gid_exists = False return gid_exists
Check if a gid exists
48
6
244,486
def add_group ( group_name , system_group = False , gid = None ) : try : group_info = grp . getgrnam ( group_name ) log ( 'group {0} already exists!' . format ( group_name ) ) if gid : group_info = grp . getgrgid ( gid ) log ( 'group with gid {0} already exists!' . format ( gid ) ) except KeyError : log ( 'creating group {0}' . format ( group_name ) ) add_new_group ( group_name , system_group , gid ) group_info = grp . getgrnam ( group_name ) return group_info
Add a group to the system
153
6
244,487
def chage ( username , lastday = None , expiredate = None , inactive = None , mindays = None , maxdays = None , root = None , warndays = None ) : cmd = [ 'chage' ] if root : cmd . extend ( [ '--root' , root ] ) if lastday : cmd . extend ( [ '--lastday' , lastday ] ) if expiredate : cmd . extend ( [ '--expiredate' , expiredate ] ) if inactive : cmd . extend ( [ '--inactive' , inactive ] ) if mindays : cmd . extend ( [ '--mindays' , mindays ] ) if maxdays : cmd . extend ( [ '--maxdays' , maxdays ] ) if warndays : cmd . extend ( [ '--warndays' , warndays ] ) cmd . append ( username ) subprocess . check_call ( cmd )
Change user password expiry information
196
6
244,488
def rsync ( from_path , to_path , flags = '-r' , options = None , timeout = None ) : options = options or [ '--delete' , '--executability' ] cmd = [ '/usr/bin/rsync' , flags ] if timeout : cmd = [ 'timeout' , str ( timeout ) ] + cmd cmd . extend ( options ) cmd . append ( from_path ) cmd . append ( to_path ) log ( " " . join ( cmd ) ) return subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) . decode ( 'UTF-8' ) . strip ( )
Replicate the contents of a path
141
7
244,489
def write_file ( path , content , owner = 'root' , group = 'root' , perms = 0o444 ) : uid = pwd . getpwnam ( owner ) . pw_uid gid = grp . getgrnam ( group ) . gr_gid # lets see if we can grab the file and compare the context, to avoid doing # a write. existing_content = None existing_uid , existing_gid , existing_perms = None , None , None try : with open ( path , 'rb' ) as target : existing_content = target . read ( ) stat = os . stat ( path ) existing_uid , existing_gid , existing_perms = ( stat . st_uid , stat . st_gid , stat . st_mode ) except Exception : pass if content != existing_content : log ( "Writing file {} {}:{} {:o}" . format ( path , owner , group , perms ) , level = DEBUG ) with open ( path , 'wb' ) as target : os . fchown ( target . fileno ( ) , uid , gid ) os . fchmod ( target . fileno ( ) , perms ) if six . PY3 and isinstance ( content , six . string_types ) : content = content . encode ( 'UTF-8' ) target . write ( content ) return # the contents were the same, but we might still need to change the # ownership or permissions. if existing_uid != uid : log ( "Changing uid on already existing content: {} -> {}" . format ( existing_uid , uid ) , level = DEBUG ) os . chown ( path , uid , - 1 ) if existing_gid != gid : log ( "Changing gid on already existing content: {} -> {}" . format ( existing_gid , gid ) , level = DEBUG ) os . chown ( path , - 1 , gid ) if existing_perms != perms : log ( "Changing permissions on existing content: {} -> {}" . format ( existing_perms , perms ) , level = DEBUG ) os . chmod ( path , perms )
Create or overwrite a file with the contents of a byte string .
471
13
244,490
def mount ( device , mountpoint , options = None , persist = False , filesystem = "ext3" ) : cmd_args = [ 'mount' ] if options is not None : cmd_args . extend ( [ '-o' , options ] ) cmd_args . extend ( [ device , mountpoint ] ) try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error mounting {} at {}\n{}' . format ( device , mountpoint , e . output ) ) return False if persist : return fstab_add ( device , mountpoint , filesystem , options = options ) return True
Mount a filesystem at a particular mountpoint
140
8
244,491
def umount ( mountpoint , persist = False ) : cmd_args = [ 'umount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False if persist : return fstab_remove ( mountpoint ) return True
Unmount a filesystem
88
4
244,492
def fstab_mount ( mountpoint ) : cmd_args = [ 'mount' , mountpoint ] try : subprocess . check_output ( cmd_args ) except subprocess . CalledProcessError as e : log ( 'Error unmounting {}\n{}' . format ( mountpoint , e . output ) ) return False return True
Mount filesystem using fstab
73
5
244,493
def file_hash ( path , hash_type = 'md5' ) : if os . path . exists ( path ) : h = getattr ( hashlib , hash_type ) ( ) with open ( path , 'rb' ) as source : h . update ( source . read ( ) ) return h . hexdigest ( ) else : return None
Generate a hash checksum of the contents of path or None if not found .
75
17
244,494
def check_hash ( path , checksum , hash_type = 'md5' ) : actual_checksum = file_hash ( path , hash_type ) if checksum != actual_checksum : raise ChecksumError ( "'%s' != '%s'" % ( checksum , actual_checksum ) )
Validate a file using a cryptographic checksum .
69
10
244,495
def restart_on_change ( restart_map , stopstart = False , restart_functions = None ) : def wrap ( f ) : @ functools . wraps ( f ) def wrapped_f ( * args , * * kwargs ) : return restart_on_change_helper ( ( lambda : f ( * args , * * kwargs ) ) , restart_map , stopstart , restart_functions ) return wrapped_f return wrap
Restart services based on configuration files changing
98
8
244,496
def restart_on_change_helper ( lambda_f , restart_map , stopstart = False , restart_functions = None ) : if restart_functions is None : restart_functions = { } checksums = { path : path_hash ( path ) for path in restart_map } r = lambda_f ( ) # create a list of lists of the services to restart restarts = [ restart_map [ path ] for path in restart_map if path_hash ( path ) != checksums [ path ] ] # create a flat list of ordered services without duplicates from lists services_list = list ( OrderedDict . fromkeys ( itertools . chain ( * restarts ) ) ) if services_list : actions = ( 'stop' , 'start' ) if stopstart else ( 'restart' , ) for service_name in services_list : if service_name in restart_functions : restart_functions [ service_name ] ( service_name ) else : for action in actions : service ( action , service_name ) return r
Helper function to perform the restart_on_change function .
229
12
244,497
def pwgen ( length = None ) : if length is None : # A random length is ok to use a weak PRNG length = random . choice ( range ( 35 , 45 ) ) alphanumeric_chars = [ l for l in ( string . ascii_letters + string . digits ) if l not in 'l0QD1vAEIOUaeiou' ] # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the # actual password random_generator = random . SystemRandom ( ) random_chars = [ random_generator . choice ( alphanumeric_chars ) for _ in range ( length ) ] return ( '' . join ( random_chars ) )
Generate a random pasword .
160
7
244,498
def is_phy_iface ( interface ) : if interface : sys_net = '/sys/class/net' if os . path . isdir ( sys_net ) : for iface in glob . glob ( os . path . join ( sys_net , '*' ) ) : if '/virtual/' in os . path . realpath ( iface ) : continue if interface == os . path . basename ( iface ) : return True return False
Returns True if interface is not virtual otherwise False .
98
10
244,499
def get_bond_master ( interface ) : if interface : iface_path = '/sys/class/net/%s' % ( interface ) if os . path . exists ( iface_path ) : if '/virtual/' in os . path . realpath ( iface_path ) : return None master = os . path . join ( iface_path , 'master' ) if os . path . exists ( master ) : master = os . path . realpath ( master ) # make sure it is a bond master if os . path . exists ( os . path . join ( master , 'bonding' ) ) : return os . path . basename ( master ) return None
Returns bond master if interface is bond slave otherwise None .
147
11